From 2e648dc69e1162d0479517d3d31e2571701cb116 Mon Sep 17 00:00:00 2001 From: machineuser Date: Wed, 19 Apr 2023 10:42:40 +0000 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=96=20@hugginface/inference=20v2.0.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- docs/_toctree.yml | 70 +- docs/index.md | 84 +- docs/inference/README.md | 63 +- docs/inference/classes/HfInference.md | 658 +++++++++------- docs/inference/classes/HfInferenceEndpoint.md | 649 +++++++++++++++ .../enums/TextGenerationStreamFinishReason.md | 37 - docs/inference/interfaces/Args.md | 11 - .../AudioClassificationOutputValue.md | 25 + .../AudioClassificationReturnValue.md | 25 - .../AutomaticSpeechRecognitionOutput.md | 13 + .../AutomaticSpeechRecognitionReturn.md | 13 - docs/inference/interfaces/BaseArgs.md | 27 + .../interfaces/ConversationalOutput.md | 38 + .../interfaces/ConversationalReturn.md | 38 - .../ImageClassificationOutputValue.md | 25 + .../ImageClassificationReturnValue.md | 25 - .../ImageSegmentationOutputValue.md | 37 + .../ImageSegmentationReturnValue.md | 37 - .../inference/interfaces/ImageToTextOutput.md | 13 + .../inference/interfaces/ImageToTextReturn.md | 13 - ...Value.md => ObjectDetectionOutputValue.md} | 8 +- docs/inference/interfaces/Options.md | 10 +- .../interfaces/QuestionAnswerReturn.md | 49 -- .../interfaces/QuestionAnsweringOutput.md | 49 ++ .../interfaces/SummarizationOutput.md | 13 + .../interfaces/SummarizationReturn.md | 13 - .../interfaces/TableQuestionAnswerReturn.md | 49 -- .../TableQuestionAnsweringOutput.md | 49 ++ .../interfaces/TextGenerationOutput.md | 13 + .../interfaces/TextGenerationReturn.md | 13 - .../TextGenerationStreamBestOfSequence.md | 14 +- .../interfaces/TextGenerationStreamDetails.md | 14 +- .../interfaces/TextGenerationStreamOutput.md | 39 + .../TextGenerationStreamPrefillToken.md | 6 +- .../interfaces/TextGenerationStreamReturn.md | 39 - .../interfaces/TextGenerationStreamToken.md | 8 +- .../TokenClassificationOutputValue.md | 61 ++ .../TokenClassificationReturnValue.md | 61 -- .../inference/interfaces/TranslationOutput.md | 13 + .../inference/interfaces/TranslationReturn.md | 13 - .../ZeroShotClassificationOutputValue.md | 31 + .../ZeroShotClassificationReturnValue.md | 31 - docs/inference/modules.md | 737 +++++++++++++++--- packages/inference/package.json | 2 +- 45 files changed, 2321 insertions(+), 917 deletions(-) create mode 100644 docs/inference/classes/HfInferenceEndpoint.md delete mode 100644 docs/inference/enums/TextGenerationStreamFinishReason.md delete mode 100644 docs/inference/interfaces/Args.md create mode 100644 docs/inference/interfaces/AudioClassificationOutputValue.md delete mode 100644 docs/inference/interfaces/AudioClassificationReturnValue.md create mode 100644 docs/inference/interfaces/AutomaticSpeechRecognitionOutput.md delete mode 100644 docs/inference/interfaces/AutomaticSpeechRecognitionReturn.md create mode 100644 docs/inference/interfaces/BaseArgs.md create mode 100644 docs/inference/interfaces/ConversationalOutput.md delete mode 100644 docs/inference/interfaces/ConversationalReturn.md create mode 100644 docs/inference/interfaces/ImageClassificationOutputValue.md delete mode 100644 docs/inference/interfaces/ImageClassificationReturnValue.md create mode 100644 docs/inference/interfaces/ImageSegmentationOutputValue.md delete mode 100644 docs/inference/interfaces/ImageSegmentationReturnValue.md create mode 100644 docs/inference/interfaces/ImageToTextOutput.md delete mode 100644 docs/inference/interfaces/ImageToTextReturn.md rename docs/inference/interfaces/{ObjectDetectionReturnValue.md => ObjectDetectionOutputValue.md} (55%) delete mode 100644 docs/inference/interfaces/QuestionAnswerReturn.md create mode 100644 docs/inference/interfaces/QuestionAnsweringOutput.md create mode 100644 docs/inference/interfaces/SummarizationOutput.md delete mode 100644 docs/inference/interfaces/SummarizationReturn.md delete mode 100644 docs/inference/interfaces/TableQuestionAnswerReturn.md create mode 100644 docs/inference/interfaces/TableQuestionAnsweringOutput.md create mode 100644 docs/inference/interfaces/TextGenerationOutput.md delete mode 100644 docs/inference/interfaces/TextGenerationReturn.md create mode 100644 docs/inference/interfaces/TextGenerationStreamOutput.md delete mode 100644 docs/inference/interfaces/TextGenerationStreamReturn.md create mode 100644 docs/inference/interfaces/TokenClassificationOutputValue.md delete mode 100644 docs/inference/interfaces/TokenClassificationReturnValue.md create mode 100644 docs/inference/interfaces/TranslationOutput.md delete mode 100644 docs/inference/interfaces/TranslationReturn.md create mode 100644 docs/inference/interfaces/ZeroShotClassificationOutputValue.md delete mode 100644 docs/inference/interfaces/ZeroShotClassificationReturnValue.md diff --git a/README.md b/README.md index 29de12d35..1e3a4c3ed 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ You can run our packages with vanilla JS, without any bundler, by using a CDN or ```html ``` diff --git a/docs/_toctree.yml b/docs/_toctree.yml index 8fab522b5..4af601117 100644 --- a/docs/_toctree.yml +++ b/docs/_toctree.yml @@ -62,51 +62,49 @@ sections: - title: HfInference local: inference/classes/HfInference - - title: Enums - sections: - - title: TextGenerationStreamFinishReason - local: inference/enums/TextGenerationStreamFinishReason + - title: HfInferenceEndpoint + local: inference/classes/HfInferenceEndpoint - title: Interfaces sections: - - title: Args - local: inference/interfaces/Args - - title: AudioClassificationReturnValue - local: inference/interfaces/AudioClassificationReturnValue - - title: AutomaticSpeechRecognitionReturn - local: inference/interfaces/AutomaticSpeechRecognitionReturn - - title: ConversationalReturn - local: inference/interfaces/ConversationalReturn - - title: ImageClassificationReturnValue - local: inference/interfaces/ImageClassificationReturnValue - - title: ImageSegmentationReturnValue - local: inference/interfaces/ImageSegmentationReturnValue - - title: ImageToTextReturn - local: inference/interfaces/ImageToTextReturn - - title: ObjectDetectionReturnValue - local: inference/interfaces/ObjectDetectionReturnValue + - title: AudioClassificationOutputValue + local: inference/interfaces/AudioClassificationOutputValue + - title: AutomaticSpeechRecognitionOutput + local: inference/interfaces/AutomaticSpeechRecognitionOutput + - title: BaseArgs + local: inference/interfaces/BaseArgs + - title: ConversationalOutput + local: inference/interfaces/ConversationalOutput + - title: ImageClassificationOutputValue + local: inference/interfaces/ImageClassificationOutputValue + - title: ImageSegmentationOutputValue + local: inference/interfaces/ImageSegmentationOutputValue + - title: ImageToTextOutput + local: inference/interfaces/ImageToTextOutput + - title: ObjectDetectionOutputValue + local: inference/interfaces/ObjectDetectionOutputValue - title: Options local: inference/interfaces/Options - - title: QuestionAnswerReturn - local: inference/interfaces/QuestionAnswerReturn - - title: SummarizationReturn - local: inference/interfaces/SummarizationReturn - - title: TableQuestionAnswerReturn - local: inference/interfaces/TableQuestionAnswerReturn - - title: TextGenerationReturn - local: inference/interfaces/TextGenerationReturn + - title: QuestionAnsweringOutput + local: inference/interfaces/QuestionAnsweringOutput + - title: SummarizationOutput + local: inference/interfaces/SummarizationOutput + - title: TableQuestionAnsweringOutput + local: inference/interfaces/TableQuestionAnsweringOutput + - title: TextGenerationOutput + local: inference/interfaces/TextGenerationOutput - title: TextGenerationStreamBestOfSequence local: inference/interfaces/TextGenerationStreamBestOfSequence - title: TextGenerationStreamDetails local: inference/interfaces/TextGenerationStreamDetails + - title: TextGenerationStreamOutput + local: inference/interfaces/TextGenerationStreamOutput - title: TextGenerationStreamPrefillToken local: inference/interfaces/TextGenerationStreamPrefillToken - - title: TextGenerationStreamReturn - local: inference/interfaces/TextGenerationStreamReturn - title: TextGenerationStreamToken local: inference/interfaces/TextGenerationStreamToken - - title: TokenClassificationReturnValue - local: inference/interfaces/TokenClassificationReturnValue - - title: TranslationReturn - local: inference/interfaces/TranslationReturn - - title: ZeroShotClassificationReturnValue - local: inference/interfaces/ZeroShotClassificationReturnValue + - title: TokenClassificationOutputValue + local: inference/interfaces/TokenClassificationOutputValue + - title: TranslationOutput + local: inference/interfaces/TranslationOutput + - title: ZeroShotClassificationOutputValue + local: inference/interfaces/ZeroShotClassificationOutputValue diff --git a/docs/index.md b/docs/index.md index 0b6de6174..c974452ea 100644 --- a/docs/index.md +++ b/docs/index.md @@ -9,12 +9,28 @@

+```ts +await inference.translation({ + model: 't5-base', + inputs: 'My name is Wolfgang and I live in Berlin' +}) + +await inference.textToImage({ + model: 'stabilityai/stable-diffusion-2', + inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]', + parameters: { + negative_prompt: 'blurry', + } +}) +``` + # Hugging Face JS libraries This is a collection of JS libraries to interact with the Hugging Face API, with TS types included. +- [@huggingface/inference](inference/README): Use the Inference API to make calls to 100,000+ Machine Learning models, or your own [inference endpoints](https://hf.co/docs/inference-endpoints/)! - [@huggingface/hub](hub/README): Interact with huggingface.co to create or delete repos and commit / download files -- [@huggingface/inference](inference/README): Use the Inference API to make calls to 100,000+ Machine Learning models! + With more to come, like `@huggingface/endpoints` to manage your HF Endpoints! @@ -29,15 +45,15 @@ The libraries are still very young, please help us by opening issues! To install via NPM, you can download the libraries as needed: ```bash -npm install @huggingface/hub npm install @huggingface/inference +npm install @huggingface/hub ``` Then import the libraries in your code: ```ts -import { createRepo, commit, deleteRepo, listFiles } from "@huggingface/hub"; import { HfInference } from "@huggingface/inference"; +import { createRepo, commit, deleteRepo, listFiles } from "@huggingface/hub"; import type { RepoId, Credentials } from "@huggingface/hub"; ``` @@ -48,18 +64,52 @@ You can run our packages with vanilla JS, without any bundler, by using a CDN or ```html ``` -## Usage example +## Usage examples + +Get your HF access token in your [account settings](https://huggingface.co/settings/tokens). + +### @huggingface/inference examples ```ts -import { createRepo, uploadFile, deleteFiles } from "@huggingface/hub"; import { HfInference } from "@huggingface/inference"; -// use an access token from your free account +const HF_ACCESS_TOKEN = "hf_..."; + +const inference = new HfInference(HF_ACCESS_TOKEN); + +await inference.translation({ + model: 't5-base', + inputs: 'My name is Wolfgang and I live in Berlin' +}) + +await inference.textToImage({ + model: 'stabilityai/stable-diffusion-2', + inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]', + parameters: { + negative_prompt: 'blurry', + } +}) + +await inference.imageToText({ + data: await (await fetch('https://picsum.photos/300/300')).blob(), + model: 'nlpconnect/vit-gpt2-image-captioning', +}) + +// Using your own inference endpoint: https://hf.co/docs/inference-endpoints/ +const gpt2 = inference.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); +const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'}); +``` + +### @huggingface/hub examples + +```ts +import { createRepo, uploadFile, deleteFiles } from "@huggingface/hub"; + const HF_ACCESS_TOKEN = "hf_..."; await createRepo({ @@ -82,26 +132,6 @@ await deleteFiles({ credentials: {accessToken: HF_ACCESS_TOKEN}, paths: ["README.md", ".gitattributes"] }); - -const inference = new HfInference(HF_ACCESS_TOKEN); - -await inference.translation({ - model: 't5-base', - inputs: 'My name is Wolfgang and I live in Berlin' -}) - -await inference.textToImage({ - inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]', - model: 'stabilityai/stable-diffusion-2', - parameters: { - negative_prompt: 'blurry', - } -}) - -await inference.imageToText({ - data: await (await fetch('https://picsum.photos/300/300')).blob(), - model: 'nlpconnect/vit-gpt2-image-captioning', -}) ``` There are more features of course, check each library's README! diff --git a/docs/inference/README.md b/docs/inference/README.md index f4c16d830..468572485 100644 --- a/docs/inference/README.md +++ b/docs/inference/README.md @@ -1,6 +1,8 @@ # 🤗 Hugging Face Inference API -A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). +A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). It also works with [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index). + +You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference) or see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs). ## Install @@ -14,16 +16,16 @@ pnpm add @huggingface/inference ## Usage -❗**Important note:** Using an API key is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your API key for **free**. +❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**. -Your API key should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the API key. +Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token. ### Basic examples ```typescript import { HfInference } from '@huggingface/inference' -const hf = new HfInference('your api key') +const hf = new HfInference('your access token') // Natural Language @@ -41,7 +43,7 @@ await hf.summarization({ } }) -await hf.questionAnswer({ +await hf.questionAnswering({ model: 'deepset/roberta-base-squad2', inputs: { question: 'What is the capital of France?', @@ -49,7 +51,7 @@ await hf.questionAnswer({ } }) -await hf.tableQuestionAnswer({ +await hf.tableQuestionAnswering({ model: 'google/tapas-base-finetuned-wtq', inputs: { query: 'How many stars does the transformers repository have?', @@ -107,7 +109,7 @@ await hf.conversational({ } }) -await hf.featureExtraction({ +await hf.sentenceSimilarity({ model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1', inputs: { source_sentence: 'That is a happy person', @@ -119,6 +121,11 @@ await hf.featureExtraction({ } }) +await hf.featureExtraction({ + model: "sentence-transformers/distilbert-base-nli-mean-tokens", + inputs: "That is a happy person", +}); + // Audio await hf.automaticSpeechRecognition({ @@ -160,6 +167,30 @@ await hf.imageToText({ data: readFileSync('test/cats.png'), model: 'nlpconnect/vit-gpt2-image-captioning' }) + +// Custom call, for models with custom parameters / outputs +await hf.request({ + model: 'my-custom-model', + inputs: 'hello world', + parameters: { + custom_param: 'some magic', + } +}) + +// Custom streaming call, for models with custom parameters / outputs +for await (const output of hf.streamingRequest({ + model: 'my-custom-model', + inputs: 'hello world', + parameters: { + custom_param: 'some magic', + } +})) { + ... +} + +// Using your own inference endpoint: https://hf.co/docs/inference-endpoints/ +const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); +const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'}); ``` ## Supported Tasks @@ -179,6 +210,7 @@ await hf.imageToText({ - [x] Zero-shot classification - [x] Conversational - [x] Feature extraction +- [x] Sentence Similarity ### Audio @@ -193,6 +225,23 @@ await hf.imageToText({ - [x] Text to image - [x] Image to text +## Tree-shaking + +You can import the functions you need directly from the module, rather than using the `HfInference` class: + +```ts +import {textGeneration} from "@huggingface/inference"; + +await textGeneration({ + accessToken: "hf_...", + model: "model_or_endpoint", + inputs: ..., + parameters: ... +}) +``` + +This will enable tree-shaking by your bundler. + ## Running tests ```console diff --git a/docs/inference/classes/HfInference.md b/docs/inference/classes/HfInference.md index ecd5a87fa..160e1f3bc 100644 --- a/docs/inference/classes/HfInference.md +++ b/docs/inference/classes/HfInference.md @@ -1,574 +1,710 @@ # Class: HfInference +## Hierarchy + +- `TaskWithNoAccessToken` + + ↳ **`HfInference`** + ## Constructors ### constructor -• **new HfInference**(`apiKey?`, `defaultOptions?`) +• **new HfInference**(`accessToken?`, `defaultOptions?`) #### Parameters | Name | Type | Default value | | :------ | :------ | :------ | -| `apiKey` | `string` | `""` | +| `accessToken` | `string` | `""` | | `defaultOptions` | [`Options`](../interfaces/Options) | `{}` | #### Defined in -[HfInference.ts:629](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L629) +[HfInference.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L25) ## Properties -### apiKey +### accessToken -• `Private` `Readonly` **apiKey**: `string` +• `Private` `Readonly` **accessToken**: `string` #### Defined in -[HfInference.ts:626](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L626) +[HfInference.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L22) ___ -### defaultOptions - -• `Private` `Readonly` **defaultOptions**: [`Options`](../interfaces/Options) - -#### Defined in - -[HfInference.ts:627](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L627) - -## Methods - ### audioClassification -▸ **audioClassification**(`args`, `options?`): `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> +• **audioClassification**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> -This task reads some audio input and outputs the likelihood of classes. -Recommended model: superb/hubert-large-superb-er +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> -| Name | Type | -| :------ | :------ | -| `args` | [`AudioClassificationArgs`](../modules#audioclassificationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary audio data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> #### Defined in -[HfInference.ts:863](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L863) +[tasks/audio/audioClassification.ts:30](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L30) ___ ### automaticSpeechRecognition -▸ **automaticSpeechRecognition**(`args`, `options?`): `Promise`<[`AutomaticSpeechRecognitionReturn`](../interfaces/AutomaticSpeechRecognitionReturn)\> +• **automaticSpeechRecognition**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> -This task reads some audio input and outputs the said words within the audio files. -Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`AutomaticSpeechRecognitionArgs`](../modules#automaticspeechrecognitionargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary audio data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`AutomaticSpeechRecognitionReturn`](../interfaces/AutomaticSpeechRecognitionReturn)\> +##### Returns + +`Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> #### Defined in -[HfInference.ts:844](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L844) +[tasks/audio/automaticSpeechRecognition.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/automaticSpeechRecognition.ts#L23) ___ ### conversational -▸ **conversational**(`args`, `options?`): `Promise`<[`ConversationalReturn`](../interfaces/ConversationalReturn)\> +• **conversational**: (`args`: { `inputs`: { `generated_responses?`: `string`[] ; `past_user_inputs?`: `string`[] ; `text`: `string` } ; `model`: `string` ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> -This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`ConversationalArgs`](../modules#conversationalargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Object` | - | +| `args.inputs.generated_responses?` | `string`[] | A list of strings corresponding to the earlier replies from the model. | +| `args.inputs.past_user_inputs?` | `string`[] | A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses. | +| `args.inputs.text` | `string` | The last input from the user in the conversation. | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.max_length?` | `number` | (Default: None). Integer to define the maximum length in tokens of the output summary. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. | +| `args.parameters.min_length?` | `number` | (Default: None). Integer to define the minimum length in tokens of the output summary. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> -`Promise`<[`ConversationalReturn`](../interfaces/ConversationalReturn)\> +#### Defined in + +[tasks/nlp/conversational.ts:65](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L65) + +___ + +### defaultOptions + +• `Private` `Readonly` **defaultOptions**: [`Options`](../interfaces/Options) #### Defined in -[HfInference.ts:814](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L814) +[HfInference.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L23) ___ ### featureExtraction -▸ **featureExtraction**(`args`, `options?`): `Promise`<[`FeatureExtractionReturn`](../modules#featureextractionreturn)\> +• **featureExtraction**: (`args`: { `inputs`: `string` \| `string`[] ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> -This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`FeatureExtractionArgs`](../modules#featureextractionargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` \| `string`[] | The inputs is a string or a list of strings to get the features from. inputs: "That is a happy person", | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`FeatureExtractionReturn`](../modules#featureextractionreturn)\> +##### Returns + +`Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> #### Defined in -[HfInference.ts:835](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L835) +[tasks/nlp/featureExtraction.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/featureExtraction.ts#L23) ___ ### fillMask -▸ **fillMask**(`args`, `options?`): `Promise`<[`FillMaskReturn`](../modules#fillmaskreturn)\> +• **fillMask**: (`args`: { `inputs`: `string` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> -Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`FillMaskArgs`](../modules#fillmaskargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | - | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns -`Promise`<[`FillMaskReturn`](../modules#fillmaskreturn)\> +`Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> #### Defined in -[HfInference.ts:637](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L637) +[tasks/nlp/fillMask.ts:31](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/fillMask.ts#L31) ___ ### imageClassification -▸ **imageClassification**(`args`, `options?`): `Promise`<[`ImageClassificationReturn`](../modules#imageclassificationreturn)\> +• **imageClassification**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> -This task reads some image input and outputs the likelihood of classes. -Recommended model: google/vit-base-patch16-224 +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`ImageClassificationArgs`](../modules#imageclassificationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`ImageClassificationReturn`](../modules#imageclassificationreturn)\> +##### Returns + +`Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> #### Defined in -[HfInference.ts:883](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L883) +[tasks/cv/imageClassification.ts:29](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L29) ___ ### imageSegmentation -▸ **imageSegmentation**(`args`, `options?`): `Promise`<[`ImageSegmentationReturn`](../modules#imagesegmentationreturn)\> +• **imageSegmentation**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> -This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. -Recommended model: facebook/detr-resnet-50-panoptic +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`ImageSegmentationArgs`](../modules#imagesegmentationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns -`Promise`<[`ImageSegmentationReturn`](../modules#imagesegmentationreturn)\> +`Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> #### Defined in -[HfInference.ts:931](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L931) +[tasks/cv/imageSegmentation.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L33) ___ ### imageToText -▸ **imageToText**(`args`, `options?`): `Promise`<[`ImageToTextReturn`](../interfaces/ImageToTextReturn)\> +• **imageToText**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> -This task reads some image input and outputs the text caption. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`ImageToTextArgs`](../modules#imagetotextargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns -`Promise`<[`ImageToTextReturn`](../interfaces/ImageToTextReturn)\> +`Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> #### Defined in -[HfInference.ts:966](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L966) +[tasks/cv/imageToText.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageToText.ts#L22) ___ -### makeRequestOptions +### objectDetection -▸ `Private` **makeRequestOptions**(`args`, `options?`): `Object` +• **objectDetection**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> -Helper that prepares request arguments +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`Args`](../interfaces/Args) & { `data?`: `ArrayBuffer` \| `Blob` ; `stream?`: `boolean` } | -| `options?` | [`Options`](../interfaces/Options) & { `binary?`: `boolean` ; `blob?`: `boolean` ; `includeCredentials?`: `boolean` } | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Object` +##### Returns -| Name | Type | -| :------ | :------ | -| `info` | `RequestInit` | -| `mergedOptions` | { `binary?`: `boolean` ; `blob?`: `boolean` ; `dont_load_model?`: `boolean` ; `includeCredentials?`: `boolean` ; `retry_on_error?`: `boolean` ; `use_cache?`: `boolean` ; `use_gpu?`: `boolean` ; `wait_for_model?`: `boolean` } | -| `mergedOptions.binary?` | `boolean` | -| `mergedOptions.blob?` | `boolean` | -| `mergedOptions.dont_load_model?` | `boolean` | -| `mergedOptions.includeCredentials?` | `boolean` | -| `mergedOptions.retry_on_error?` | `boolean` | -| `mergedOptions.use_cache?` | `boolean` | -| `mergedOptions.use_gpu?` | `boolean` | -| `mergedOptions.wait_for_model?` | `boolean` | -| `url` | `string` | +`Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> #### Defined in -[HfInference.ts:978](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L978) +[tasks/cv/objectDetection.ts:39](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L39) ___ -### objectDetection +### questionAnswering -▸ **objectDetection**(`args`, `options?`): `Promise`<[`ObjectDetectionReturn`](../modules#objectdetectionreturn)\> +• **questionAnswering**: (`args`: { `inputs`: { `context`: `string` ; `question`: `string` } ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> -This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. -Recommended model: facebook/detr-resnet-50 +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`ObjectDetectionArgs`](../modules#objectdetectionargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Object` | - | +| `args.inputs.context` | `string` | - | +| `args.inputs.question` | `string` | - | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`ObjectDetectionReturn`](../modules#objectdetectionreturn)\> +##### Returns + +`Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> #### Defined in -[HfInference.ts:903](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L903) +[tasks/nlp/questionAnswering.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L34) ___ -### questionAnswer +### request + +• **request**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> }, `options?`: [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` }) => `Promise`<`unknown`\> -▸ **questionAnswer**(`args`, `options?`): `Promise`<[`QuestionAnswerReturn`](../interfaces/QuestionAnswerReturn)\> +#### Type declaration -Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2 +▸ (`args`, `options?`): `Promise`<`unknown`\> -#### Parameters +##### Parameters | Name | Type | | :------ | :------ | -| `args` | [`QuestionAnswerArgs`](../modules#questionanswerargs) | -| `options?` | [`Options`](../interfaces/Options) | +| `args` | { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } | +| `options?` | [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` } | -#### Returns +##### Returns -`Promise`<[`QuestionAnswerReturn`](../interfaces/QuestionAnswerReturn)\> +`Promise`<`unknown`\> #### Defined in -[HfInference.ts:671](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L671) +[tasks/custom/request.ts:7](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/request.ts#L7) ___ -### request +### sentenceSimilarity -▸ **request**<`T`\>(`args`, `options?`): `Promise`<`T`\> +• **sentenceSimilarity**: (`args`: { `inputs`: `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> -#### Type parameters +#### Type declaration -| Name | -| :------ | -| `T` | +▸ (`args`, `options?`): `Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> -#### Parameters +##### Parameters -| Name | Type | -| :------ | :------ | -| `args` | [`Args`](../interfaces/Args) & { `data?`: `ArrayBuffer` \| `Blob` } | -| `options?` | [`Options`](../interfaces/Options) & { `binary?`: `boolean` ; `blob?`: `boolean` ; `includeCredentials?`: `boolean` } | +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] | The inputs vary based on the model. For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will look like this: inputs: { "source_sentence": "That is a happy person", "sentences": ["That is a happy dog", "That is a very happy person", "Today is a sunny day"] } | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -#### Returns +##### Returns -`Promise`<`T`\> +`Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> #### Defined in -[HfInference.ts:1030](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L1030) +[tasks/nlp/sentenceSimilarity.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/sentenceSimilarity.ts#L25) ___ ### streamingRequest -▸ **streamingRequest**<`T`\>(`args`, `options?`): `AsyncGenerator`<`T`, `any`, `unknown`\> - -Make request that uses server-sent events and returns response as a generator +• **streamingRequest**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> }, `options?`: [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` }) => `AsyncGenerator`<`unknown`, `any`, `unknown`\> -#### Type parameters +#### Type declaration -| Name | -| :------ | -| `T` | +▸ (`args`, `options?`): `AsyncGenerator`<`unknown`, `any`, `unknown`\> -#### Parameters +##### Parameters | Name | Type | | :------ | :------ | -| `args` | [`Args`](../interfaces/Args) & { `data?`: `ArrayBuffer` \| `Blob` } | -| `options?` | [`Options`](../interfaces/Options) & { `binary?`: `boolean` ; `blob?`: `boolean` ; `includeCredentials?`: `boolean` } | +| `args` | { `data`: `Blob` \| `ArrayBuffer` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `model`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } | +| `options?` | [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` } | -#### Returns +##### Returns -`AsyncGenerator`<`T`, `any`, `unknown`\> +`AsyncGenerator`<`unknown`, `any`, `unknown`\> #### Defined in -[HfInference.ts:1066](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L1066) +[tasks/custom/streamingRequest.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/streamingRequest.ts#L9) ___ ### summarization -▸ **summarization**(`args`, `options?`): `Promise`<[`SummarizationReturn`](../interfaces/SummarizationReturn)\> +• **summarization**: (`args`: { `inputs`: `string` ; `model`: `string` ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> -This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`SummarizationArgs`](../modules#summarizationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns - -`Promise`<[`SummarizationReturn`](../interfaces/SummarizationReturn)\> +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be summarized | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.max_length?` | `number` | (Default: None). Integer to define the maximum length in tokens of the output summary. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. | +| `args.parameters.min_length?` | `number` | (Default: None). Integer to define the minimum length in tokens of the output summary. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> #### Defined in -[HfInference.ts:659](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L659) +[tasks/nlp/summarization.ts:52](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/summarization.ts#L52) ___ -### tableQuestionAnswer +### tableQuestionAnswering -▸ **tableQuestionAnswer**(`args`, `options?`): `Promise`<[`TableQuestionAnswerReturn`](../interfaces/TableQuestionAnswerReturn)\> +• **tableQuestionAnswering**: (`args`: { `inputs`: { `query`: `string` ; `table`: `Record`<`string`, `string`[]\> } ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> -Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`TableQuestionAnswerArgs`](../modules#tablequestionanswerargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Object` | - | +| `args.inputs.query` | `string` | The query in plain text that you want to ask the table | +| `args.inputs.table` | `Record`<`string`, `string`[]\> | A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`TableQuestionAnswerReturn`](../interfaces/TableQuestionAnswerReturn)\> +##### Returns + +`Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> #### Defined in -[HfInference.ts:689](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L689) +[tasks/nlp/tableQuestionAnswering.ts:40](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L40) ___ ### textClassification -▸ **textClassification**(`args`, `options?`): `Promise`<[`TextClassificationReturn`](../modules#textclassificationreturn)\> +• **textClassification**: (`args`: { `inputs`: `string` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> -Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`TextClassificationArgs`](../modules#textclassificationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be classified | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns -`Promise`<[`TextClassificationReturn`](../modules#textclassificationreturn)\> +`Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> #### Defined in -[HfInference.ts:712](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L712) +[tasks/nlp/textClassification.ts:26](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textClassification.ts#L26) ___ ### textGeneration -▸ **textGeneration**(`args`, `options?`): `Promise`<[`TextGenerationReturn`](../interfaces/TextGenerationReturn)\> +• **textGeneration**: (`args`: { `inputs`: `string` ; `model`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> -Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). +#### Type declaration -#### Parameters - -| Name | Type | -| :------ | :------ | -| `args` | [`TextGenerationArgs`](../modules#textgenerationargs) | -| `options?` | [`Options`](../interfaces/Options) | +▸ (`args`, `options?`): `Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> -#### Returns +##### Parameters -`Promise`<[`TextGenerationReturn`](../interfaces/TextGenerationReturn)\> +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be generated from | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.do_sample?` | `boolean` | (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | +| `args.parameters.max_new_tokens?` | `number` | (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | +| `args.parameters.num_return_sequences?` | `number` | (Default: 1). Integer. The number of proposition you want to be returned. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.return_full_text?` | `boolean` | (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> #### Defined in -[HfInference.ts:725](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L725) +[tasks/nlp/textGeneration.ts:60](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGeneration.ts#L60) ___ ### textGenerationStream -▸ **textGenerationStream**(`args`, `options?`): `AsyncGenerator`<[`TextGenerationStreamReturn`](../interfaces/TextGenerationStreamReturn), `any`, `unknown`\> +• **textGenerationStream**: (`args`: { `inputs`: `string` ; `model`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> -Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time +#### Type declaration -#### Parameters - -| Name | Type | -| :------ | :------ | -| `args` | [`TextGenerationArgs`](../modules#textgenerationargs) | -| `options?` | [`Options`](../interfaces/Options) | +▸ (`args`, `options?`): `AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> -#### Returns +##### Parameters -`AsyncGenerator`<[`TextGenerationStreamReturn`](../interfaces/TextGenerationStreamReturn), `any`, `unknown`\> +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be generated from | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.do_sample?` | `boolean` | (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | +| `args.parameters.max_new_tokens?` | `number` | (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | +| `args.parameters.num_return_sequences?` | `number` | (Default: 1). Integer. The number of proposition you want to be returned. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.return_full_text?` | `boolean` | (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> #### Defined in -[HfInference.ts:737](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L737) +[tasks/nlp/textGenerationStream.ts:87](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L87) ___ ### textToImage -▸ **textToImage**(`args`, `options?`): `Promise`<`Blob`\> +• **textToImage**: (`args`: { `inputs`: `string` ; `model`: `string` ; `parameters?`: { `guidance_scale?`: `number` ; `height?`: `number` ; `negative_prompt?`: `string` ; `num_inference_steps?`: `number` ; `width?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<`Blob`\> -This task reads some text input and outputs an image. -Recommended model: stabilityai/stable-diffusion-2 +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<`Blob`\> -| Name | Type | -| :------ | :------ | -| `args` | [`TextToImageArgs`](../modules#texttoimageargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | The text to generate an image from | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.guidance_scale?` | `number` | Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. | +| `args.parameters.height?` | `number` | The height in pixels of the generated image | +| `args.parameters.negative_prompt?` | `string` | An optional negative prompt for the image generation | +| `args.parameters.num_inference_steps?` | `number` | The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. | +| `args.parameters.width?` | `number` | The width in pixels of the generated image | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns `Promise`<`Blob`\> #### Defined in -[HfInference.ts:951](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L951) +[tasks/cv/textToImage.ts:41](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/textToImage.ts#L41) ___ ### tokenClassification -▸ **tokenClassification**(`args`, `options?`): `Promise`<[`TokenClassificationReturn`](../modules#tokenclassificationreturn)\> +• **tokenClassification**: (`args`: { `inputs`: `string` ; `model`: `string` ; `parameters?`: { `aggregation_strategy?`: ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> -Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`TokenClassificationArgs`](../modules#tokenclassificationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be classified | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters?` | `Object` | - | +| `args.parameters.aggregation_strategy?` | ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` | (Default: simple). There are several aggregation strategies: none: Every token gets classified without further aggregation. simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score. | +| `options?` | [`Options`](../interfaces/Options) | - | -`Promise`<[`TokenClassificationReturn`](../modules#tokenclassificationreturn)\> +##### Returns + +`Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> #### Defined in -[HfInference.ts:747](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L747) +[tasks/nlp/tokenClassification.ts:57](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L57) ___ ### translation -▸ **translation**(`args`, `options?`): `Promise`<[`TranslationReturn`](../interfaces/TranslationReturn)\> +• **translation**: (`args`: { `inputs`: `string` ; `model`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> -This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en. +#### Type declaration -#### Parameters +▸ (`args`, `options?`): `Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> -| Name | Type | -| :------ | :------ | -| `args` | [`TranslationArgs`](../modules#translationargs) | -| `options?` | [`Options`](../interfaces/Options) | +##### Parameters -#### Returns +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be translated | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns -`Promise`<[`TranslationReturn`](../interfaces/TranslationReturn)\> +`Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> #### Defined in -[HfInference.ts:773](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L773) +[tasks/nlp/translation.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/translation.ts#L22) ___ ### zeroShotClassification -▸ **zeroShotClassification**(`args`, `options?`): `Promise`<[`ZeroShotClassificationReturn`](../modules#zeroshotclassificationreturn)\> +• **zeroShotClassification**: (`args`: { `inputs`: `string` \| `string`[] ; `model`: `string` ; `parameters`: { `candidate_labels`: `string`[] ; `multi_label?`: `boolean` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` \| `string`[] | a string or list of strings | +| `args.model` | `string` | The model to use. Can be a full URL for HF inference endpoints. | +| `args.parameters` | `Object` | - | +| `args.parameters.candidate_labels` | `string`[] | a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end. | +| `args.parameters.multi_label?` | `boolean` | (Default: false) Boolean that is set to True if classes can overlap | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L34) + +## Methods + +### endpoint + +▸ **endpoint**(`endpointUrl`): [`HfInferenceEndpoint`](HfInferenceEndpoint) -This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli. +Returns copy of HfInference tied to a specified endpoint. #### Parameters | Name | Type | | :------ | :------ | -| `args` | [`ZeroShotClassificationArgs`](../modules#zeroshotclassificationargs) | -| `options?` | [`Options`](../interfaces/Options) | +| `endpointUrl` | `string` | #### Returns -`Promise`<[`ZeroShotClassificationReturn`](../modules#zeroshotclassificationreturn)\> +[`HfInferenceEndpoint`](HfInferenceEndpoint) #### Defined in -[HfInference.ts:785](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L785) +[HfInference.ts:42](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L42) diff --git a/docs/inference/classes/HfInferenceEndpoint.md b/docs/inference/classes/HfInferenceEndpoint.md new file mode 100644 index 000000000..bb20002c3 --- /dev/null +++ b/docs/inference/classes/HfInferenceEndpoint.md @@ -0,0 +1,649 @@ +# Class: HfInferenceEndpoint + +## Hierarchy + +- `TaskWithNoAccessTokenNoModel` + + ↳ **`HfInferenceEndpoint`** + +## Constructors + +### constructor + +• **new HfInferenceEndpoint**(`endpointUrl`, `accessToken?`, `defaultOptions?`) + +#### Parameters + +| Name | Type | Default value | +| :------ | :------ | :------ | +| `endpointUrl` | `string` | `undefined` | +| `accessToken` | `string` | `""` | +| `defaultOptions` | [`Options`](../interfaces/Options) | `{}` | + +#### Defined in + +[HfInference.ts:48](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L48) + +## Properties + +### audioClassification + +• **audioClassification**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary audio data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`AudioClassificationReturn`](../modules#audioclassificationreturn)\> + +#### Defined in + +[tasks/audio/audioClassification.ts:30](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L30) + +___ + +### automaticSpeechRecognition + +• **automaticSpeechRecognition**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary audio data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`AutomaticSpeechRecognitionOutput`](../interfaces/AutomaticSpeechRecognitionOutput)\> + +#### Defined in + +[tasks/audio/automaticSpeechRecognition.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/automaticSpeechRecognition.ts#L23) + +___ + +### conversational + +• **conversational**: (`args`: { `inputs`: { `generated_responses?`: `string`[] ; `past_user_inputs?`: `string`[] ; `text`: `string` } ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Object` | - | +| `args.inputs.generated_responses?` | `string`[] | A list of strings corresponding to the earlier replies from the model. | +| `args.inputs.past_user_inputs?` | `string`[] | A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses. | +| `args.inputs.text` | `string` | The last input from the user in the conversation. | +| `args.parameters?` | `Object` | - | +| `args.parameters.max_length?` | `number` | (Default: None). Integer to define the maximum length in tokens of the output summary. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. | +| `args.parameters.min_length?` | `number` | (Default: None). Integer to define the minimum length in tokens of the output summary. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ConversationalOutput`](../interfaces/ConversationalOutput)\> + +#### Defined in + +[tasks/nlp/conversational.ts:65](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L65) + +___ + +### featureExtraction + +• **featureExtraction**: (`args`: { `inputs`: `string` \| `string`[] }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` \| `string`[] | The inputs is a string or a list of strings to get the features from. inputs: "That is a happy person", | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`FeatureExtractionOutput`](../modules#featureextractionoutput)\> + +#### Defined in + +[tasks/nlp/featureExtraction.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/featureExtraction.ts#L23) + +___ + +### fillMask + +• **fillMask**: (`args`: { `inputs`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | `Object` | +| `args.inputs` | `string` | +| `options?` | [`Options`](../interfaces/Options) | + +##### Returns + +`Promise`<[`FillMaskOutput`](../modules#fillmaskoutput)\> + +#### Defined in + +[tasks/nlp/fillMask.ts:31](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/fillMask.ts#L31) + +___ + +### imageClassification + +• **imageClassification**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ImageClassificationOutput`](../modules#imageclassificationoutput)\> + +#### Defined in + +[tasks/cv/imageClassification.ts:29](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L29) + +___ + +### imageSegmentation + +• **imageSegmentation**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ImageSegmentationOutput`](../modules#imagesegmentationoutput)\> + +#### Defined in + +[tasks/cv/imageSegmentation.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L33) + +___ + +### imageToText + +• **imageToText**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ImageToTextOutput`](../interfaces/ImageToTextOutput)\> + +#### Defined in + +[tasks/cv/imageToText.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageToText.ts#L22) + +___ + +### objectDetection + +• **objectDetection**: (`args`: { `data`: `Blob` \| `ArrayBuffer` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.data` | `Blob` \| `ArrayBuffer` | Binary image data | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ObjectDetectionOutput`](../modules#objectdetectionoutput)\> + +#### Defined in + +[tasks/cv/objectDetection.ts:39](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L39) + +___ + +### questionAnswering + +• **questionAnswering**: (`args`: { `inputs`: { `context`: `string` ; `question`: `string` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | `Object` | +| `args.inputs` | `Object` | +| `args.inputs.context` | `string` | +| `args.inputs.question` | `string` | +| `options?` | [`Options`](../interfaces/Options) | + +##### Returns + +`Promise`<[`QuestionAnsweringOutput`](../interfaces/QuestionAnsweringOutput)\> + +#### Defined in + +[tasks/nlp/questionAnswering.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L34) + +___ + +### request + +• **request**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `parameters?`: `Record`<`string`, `unknown`\> }, `options?`: [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` }) => `Promise`<`unknown`\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<`unknown`\> + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | { `data`: `Blob` \| `ArrayBuffer` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `parameters?`: `Record`<`string`, `unknown`\> } | +| `options?` | [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` } | + +##### Returns + +`Promise`<`unknown`\> + +#### Defined in + +[tasks/custom/request.ts:7](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/request.ts#L7) + +___ + +### sentenceSimilarity + +• **sentenceSimilarity**: (`args`: { `inputs`: `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] | The inputs vary based on the model. For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will look like this: inputs: { "source_sentence": "That is a happy person", "sentences": ["That is a happy dog", "That is a very happy person", "Today is a sunny day"] } | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`SentenceSimilarityOutput`](../modules#sentencesimilarityoutput)\> + +#### Defined in + +[tasks/nlp/sentenceSimilarity.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/sentenceSimilarity.ts#L25) + +___ + +### streamingRequest + +• **streamingRequest**: (`args`: { `data`: `Blob` \| `ArrayBuffer` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `parameters?`: `Record`<`string`, `unknown`\> }, `options?`: [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` }) => `AsyncGenerator`<`unknown`, `any`, `unknown`\> + +#### Type declaration + +▸ (`args`, `options?`): `AsyncGenerator`<`unknown`, `any`, `unknown`\> + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | { `data`: `Blob` \| `ArrayBuffer` ; `parameters?`: `Record`<`string`, `unknown`\> } \| { `inputs`: `unknown` ; `parameters?`: `Record`<`string`, `unknown`\> } | +| `options?` | [`Options`](../interfaces/Options) & { `includeCredentials?`: `boolean` } | + +##### Returns + +`AsyncGenerator`<`unknown`, `any`, `unknown`\> + +#### Defined in + +[tasks/custom/streamingRequest.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/streamingRequest.ts#L9) + +___ + +### summarization + +• **summarization**: (`args`: { `inputs`: `string` ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be summarized | +| `args.parameters?` | `Object` | - | +| `args.parameters.max_length?` | `number` | (Default: None). Integer to define the maximum length in tokens of the output summary. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. | +| `args.parameters.min_length?` | `number` | (Default: None). Integer to define the minimum length in tokens of the output summary. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`SummarizationOutput`](../interfaces/SummarizationOutput)\> + +#### Defined in + +[tasks/nlp/summarization.ts:52](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/summarization.ts#L52) + +___ + +### tableQuestionAnswering + +• **tableQuestionAnswering**: (`args`: { `inputs`: { `query`: `string` ; `table`: `Record`<`string`, `string`[]\> } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `Object` | - | +| `args.inputs.query` | `string` | The query in plain text that you want to ask the table | +| `args.inputs.table` | `Record`<`string`, `string`[]\> | A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TableQuestionAnsweringOutput`](../interfaces/TableQuestionAnsweringOutput)\> + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:40](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L40) + +___ + +### textClassification + +• **textClassification**: (`args`: { `inputs`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be classified | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TextClassificationOutput`](../modules#textclassificationoutput)\> + +#### Defined in + +[tasks/nlp/textClassification.ts:26](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textClassification.ts#L26) + +___ + +### textGeneration + +• **textGeneration**: (`args`: { `inputs`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be generated from | +| `args.parameters?` | `Object` | - | +| `args.parameters.do_sample?` | `boolean` | (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | +| `args.parameters.max_new_tokens?` | `number` | (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | +| `args.parameters.num_return_sequences?` | `number` | (Default: 1). Integer. The number of proposition you want to be returned. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.return_full_text?` | `boolean` | (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TextGenerationOutput`](../interfaces/TextGenerationOutput)\> + +#### Defined in + +[tasks/nlp/textGeneration.ts:60](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGeneration.ts#L60) + +___ + +### textGenerationStream + +• **textGenerationStream**: (`args`: { `inputs`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> + +#### Type declaration + +▸ (`args`, `options?`): `AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be generated from | +| `args.parameters?` | `Object` | - | +| `args.parameters.do_sample?` | `boolean` | (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | +| `args.parameters.max_new_tokens?` | `number` | (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | +| `args.parameters.max_time?` | `number` | (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | +| `args.parameters.num_return_sequences?` | `number` | (Default: 1). Integer. The number of proposition you want to be returned. | +| `args.parameters.repetition_penalty?` | `number` | (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | +| `args.parameters.return_full_text?` | `boolean` | (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | +| `args.parameters.temperature?` | `number` | (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | +| `args.parameters.top_k?` | `number` | (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | +| `args.parameters.top_p?` | `number` | (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`AsyncGenerator`<[`TextGenerationStreamOutput`](../interfaces/TextGenerationStreamOutput), `any`, `unknown`\> + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:87](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L87) + +___ + +### textToImage + +• **textToImage**: (`args`: { `inputs`: `string` ; `parameters?`: { `guidance_scale?`: `number` ; `height?`: `number` ; `negative_prompt?`: `string` ; `num_inference_steps?`: `number` ; `width?`: `number` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<`Blob`\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<`Blob`\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | The text to generate an image from | +| `args.parameters?` | `Object` | - | +| `args.parameters.guidance_scale?` | `number` | Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. | +| `args.parameters.height?` | `number` | The height in pixels of the generated image | +| `args.parameters.negative_prompt?` | `string` | An optional negative prompt for the image generation | +| `args.parameters.num_inference_steps?` | `number` | The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. | +| `args.parameters.width?` | `number` | The width in pixels of the generated image | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<`Blob`\> + +#### Defined in + +[tasks/cv/textToImage.ts:41](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/textToImage.ts#L41) + +___ + +### tokenClassification + +• **tokenClassification**: (`args`: { `inputs`: `string` ; `parameters?`: { `aggregation_strategy?`: ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be classified | +| `args.parameters?` | `Object` | - | +| `args.parameters.aggregation_strategy?` | ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` | (Default: simple). There are several aggregation strategies: none: Every token gets classified without further aggregation. simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score. | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TokenClassificationOutput`](../modules#tokenclassificationoutput)\> + +#### Defined in + +[tasks/nlp/tokenClassification.ts:57](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L57) + +___ + +### translation + +• **translation**: (`args`: { `inputs`: `string` }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` | A string to be translated | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`TranslationOutput`](../interfaces/TranslationOutput)\> + +#### Defined in + +[tasks/nlp/translation.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/translation.ts#L22) + +___ + +### zeroShotClassification + +• **zeroShotClassification**: (`args`: { `inputs`: `string` \| `string`[] ; `parameters`: { `candidate_labels`: `string`[] ; `multi_label?`: `boolean` } }, `options?`: [`Options`](../interfaces/Options)) => `Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +#### Type declaration + +▸ (`args`, `options?`): `Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +##### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `args` | `Object` | - | +| `args.inputs` | `string` \| `string`[] | a string or list of strings | +| `args.parameters` | `Object` | - | +| `args.parameters.candidate_labels` | `string`[] | a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end. | +| `args.parameters.multi_label?` | `boolean` | (Default: false) Boolean that is set to True if classes can overlap | +| `options?` | [`Options`](../interfaces/Options) | - | + +##### Returns + +`Promise`<[`ZeroShotClassificationOutput`](../modules#zeroshotclassificationoutput)\> + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L34) diff --git a/docs/inference/enums/TextGenerationStreamFinishReason.md b/docs/inference/enums/TextGenerationStreamFinishReason.md deleted file mode 100644 index defe7afa4..000000000 --- a/docs/inference/enums/TextGenerationStreamFinishReason.md +++ /dev/null @@ -1,37 +0,0 @@ -# Enumeration: TextGenerationStreamFinishReason - -## Enumeration Members - -### EndOfSequenceToken - -• **EndOfSequenceToken** = ``"eos_token"`` - -the model generated its end of sequence token - -#### Defined in - -[HfInference.ts:275](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L275) - -___ - -### Length - -• **Length** = ``"length"`` - -number of generated tokens == `max_new_tokens` - -#### Defined in - -[HfInference.ts:273](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L273) - -___ - -### StopSequence - -• **StopSequence** = ``"stop_sequence"`` - -the model generated a text included in `stop_sequences` - -#### Defined in - -[HfInference.ts:277](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L277) diff --git a/docs/inference/interfaces/Args.md b/docs/inference/interfaces/Args.md deleted file mode 100644 index eaf761318..000000000 --- a/docs/inference/interfaces/Args.md +++ /dev/null @@ -1,11 +0,0 @@ -# Interface: Args - -## Properties - -### model - -• **model**: `string` - -#### Defined in - -[HfInference.ts:32](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L32) diff --git a/docs/inference/interfaces/AudioClassificationOutputValue.md b/docs/inference/interfaces/AudioClassificationOutputValue.md new file mode 100644 index 000000000..4f5794e30 --- /dev/null +++ b/docs/inference/interfaces/AudioClassificationOutputValue.md @@ -0,0 +1,25 @@ +# Interface: AudioClassificationOutputValue + +## Properties + +### label + +• **label**: `string` + +The label for the class (model specific) + +#### Defined in + +[tasks/audio/audioClassification.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L16) + +___ + +### score + +• **score**: `number` + +A float that represents how likely it is that the audio file belongs to this class. + +#### Defined in + +[tasks/audio/audioClassification.ts:21](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L21) diff --git a/docs/inference/interfaces/AudioClassificationReturnValue.md b/docs/inference/interfaces/AudioClassificationReturnValue.md deleted file mode 100644 index e3d1477d0..000000000 --- a/docs/inference/interfaces/AudioClassificationReturnValue.md +++ /dev/null @@ -1,25 +0,0 @@ -# Interface: AudioClassificationReturnValue - -## Properties - -### label - -• **label**: `string` - -The label for the class (model specific) - -#### Defined in - -[HfInference.ts:569](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L569) - -___ - -### score - -• **score**: `number` - -A float that represents how likely it is that the audio file belongs to this class. - -#### Defined in - -[HfInference.ts:574](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L574) diff --git a/docs/inference/interfaces/AutomaticSpeechRecognitionOutput.md b/docs/inference/interfaces/AutomaticSpeechRecognitionOutput.md new file mode 100644 index 000000000..a4aa8f92d --- /dev/null +++ b/docs/inference/interfaces/AutomaticSpeechRecognitionOutput.md @@ -0,0 +1,13 @@ +# Interface: AutomaticSpeechRecognitionOutput + +## Properties + +### text + +• **text**: `string` + +The text that was recognized from the audio + +#### Defined in + +[tasks/audio/automaticSpeechRecognition.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/automaticSpeechRecognition.ts#L16) diff --git a/docs/inference/interfaces/AutomaticSpeechRecognitionReturn.md b/docs/inference/interfaces/AutomaticSpeechRecognitionReturn.md deleted file mode 100644 index 21b37258b..000000000 --- a/docs/inference/interfaces/AutomaticSpeechRecognitionReturn.md +++ /dev/null @@ -1,13 +0,0 @@ -# Interface: AutomaticSpeechRecognitionReturn - -## Properties - -### text - -• **text**: `string` - -The text that was recognized from the audio - -#### Defined in - -[HfInference.ts:555](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L555) diff --git a/docs/inference/interfaces/BaseArgs.md b/docs/inference/interfaces/BaseArgs.md new file mode 100644 index 000000000..a119ab639 --- /dev/null +++ b/docs/inference/interfaces/BaseArgs.md @@ -0,0 +1,27 @@ +# Interface: BaseArgs + +## Properties + +### accessToken + +• `Optional` **accessToken**: `string` + +The access token to use. Without it, you'll get rate-limited quickly. + +Can be created for free in hf.co/settings/token + +#### Defined in + +[types.ts:31](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L31) + +___ + +### model + +• **model**: `string` + +The model to use. Can be a full URL for HF inference endpoints. + +#### Defined in + +[types.ts:35](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L35) diff --git a/docs/inference/interfaces/ConversationalOutput.md b/docs/inference/interfaces/ConversationalOutput.md new file mode 100644 index 000000000..3f0108179 --- /dev/null +++ b/docs/inference/interfaces/ConversationalOutput.md @@ -0,0 +1,38 @@ +# Interface: ConversationalOutput + +## Properties + +### conversation + +• **conversation**: `Object` + +#### Type declaration + +| Name | Type | +| :------ | :------ | +| `generated_responses` | `string`[] | +| `past_user_inputs` | `string`[] | + +#### Defined in + +[tasks/nlp/conversational.ts:53](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L53) + +___ + +### generated\_text + +• **generated\_text**: `string` + +#### Defined in + +[tasks/nlp/conversational.ts:57](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L57) + +___ + +### warnings + +• **warnings**: `string`[] + +#### Defined in + +[tasks/nlp/conversational.ts:58](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L58) diff --git a/docs/inference/interfaces/ConversationalReturn.md b/docs/inference/interfaces/ConversationalReturn.md deleted file mode 100644 index 447dbe34e..000000000 --- a/docs/inference/interfaces/ConversationalReturn.md +++ /dev/null @@ -1,38 +0,0 @@ -# Interface: ConversationalReturn - -## Properties - -### conversation - -• **conversation**: `Object` - -#### Type declaration - -| Name | Type | -| :------ | :------ | -| `generated_responses` | `string`[] | -| `past_user_inputs` | `string`[] | - -#### Defined in - -[HfInference.ts:445](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L445) - -___ - -### generated\_text - -• **generated\_text**: `string` - -#### Defined in - -[HfInference.ts:449](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L449) - -___ - -### warnings - -• **warnings**: `string`[] - -#### Defined in - -[HfInference.ts:450](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L450) diff --git a/docs/inference/interfaces/ImageClassificationOutputValue.md b/docs/inference/interfaces/ImageClassificationOutputValue.md new file mode 100644 index 000000000..7180500e3 --- /dev/null +++ b/docs/inference/interfaces/ImageClassificationOutputValue.md @@ -0,0 +1,25 @@ +# Interface: ImageClassificationOutputValue + +## Properties + +### label + +• **label**: `string` + +A float that represents how likely it is that the image file belongs to this class. + +#### Defined in + +[tasks/cv/imageClassification.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L16) + +___ + +### score + +• **score**: `number` + +The label for the class (model specific) + +#### Defined in + +[tasks/cv/imageClassification.ts:20](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L20) diff --git a/docs/inference/interfaces/ImageClassificationReturnValue.md b/docs/inference/interfaces/ImageClassificationReturnValue.md deleted file mode 100644 index b794aee8c..000000000 --- a/docs/inference/interfaces/ImageClassificationReturnValue.md +++ /dev/null @@ -1,25 +0,0 @@ -# Interface: ImageClassificationReturnValue - -## Properties - -### label - -• **label**: `string` - -A float that represents how likely it is that the image file belongs to this class. - -#### Defined in - -[HfInference.ts:481](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L481) - -___ - -### score - -• **score**: `number` - -The label for the class (model specific) - -#### Defined in - -[HfInference.ts:485](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L485) diff --git a/docs/inference/interfaces/ImageSegmentationOutputValue.md b/docs/inference/interfaces/ImageSegmentationOutputValue.md new file mode 100644 index 000000000..0da033900 --- /dev/null +++ b/docs/inference/interfaces/ImageSegmentationOutputValue.md @@ -0,0 +1,37 @@ +# Interface: ImageSegmentationOutputValue + +## Properties + +### label + +• **label**: `string` + +The label for the class (model specific) of a segment. + +#### Defined in + +[tasks/cv/imageSegmentation.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L16) + +___ + +### mask + +• **mask**: `string` + +A str (base64 str of a single channel black-and-white img) representing the mask of a segment. + +#### Defined in + +[tasks/cv/imageSegmentation.ts:20](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L20) + +___ + +### score + +• **score**: `number` + +A float that represents how likely it is that the detected object belongs to the given class. + +#### Defined in + +[tasks/cv/imageSegmentation.ts:24](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L24) diff --git a/docs/inference/interfaces/ImageSegmentationReturnValue.md b/docs/inference/interfaces/ImageSegmentationReturnValue.md deleted file mode 100644 index 2f2901c7a..000000000 --- a/docs/inference/interfaces/ImageSegmentationReturnValue.md +++ /dev/null @@ -1,37 +0,0 @@ -# Interface: ImageSegmentationReturnValue - -## Properties - -### label - -• **label**: `string` - -The label for the class (model specific) of a segment. - -#### Defined in - -[HfInference.ts:531](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L531) - -___ - -### mask - -• **mask**: `string` - -A str (base64 str of a single channel black-and-white img) representing the mask of a segment. - -#### Defined in - -[HfInference.ts:535](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L535) - -___ - -### score - -• **score**: `number` - -A float that represents how likely it is that the detected object belongs to the given class. - -#### Defined in - -[HfInference.ts:539](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L539) diff --git a/docs/inference/interfaces/ImageToTextOutput.md b/docs/inference/interfaces/ImageToTextOutput.md new file mode 100644 index 000000000..465cb51bb --- /dev/null +++ b/docs/inference/interfaces/ImageToTextOutput.md @@ -0,0 +1,13 @@ +# Interface: ImageToTextOutput + +## Properties + +### generated\_text + +• **generated\_text**: `string` + +The generated caption + +#### Defined in + +[tasks/cv/imageToText.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageToText.ts#L16) diff --git a/docs/inference/interfaces/ImageToTextReturn.md b/docs/inference/interfaces/ImageToTextReturn.md deleted file mode 100644 index 93bc33498..000000000 --- a/docs/inference/interfaces/ImageToTextReturn.md +++ /dev/null @@ -1,13 +0,0 @@ -# Interface: ImageToTextReturn - -## Properties - -### generated\_text - -• **generated\_text**: `string` - -The generated caption - -#### Defined in - -[HfInference.ts:622](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L622) diff --git a/docs/inference/interfaces/ObjectDetectionReturnValue.md b/docs/inference/interfaces/ObjectDetectionOutputValue.md similarity index 55% rename from docs/inference/interfaces/ObjectDetectionReturnValue.md rename to docs/inference/interfaces/ObjectDetectionOutputValue.md index 196be1744..b5bc05d76 100644 --- a/docs/inference/interfaces/ObjectDetectionReturnValue.md +++ b/docs/inference/interfaces/ObjectDetectionOutputValue.md @@ -1,4 +1,4 @@ -# Interface: ObjectDetectionReturnValue +# Interface: ObjectDetectionOutputValue ## Properties @@ -19,7 +19,7 @@ A dict (with keys [xmin,ymin,xmax,ymax]) representing the bounding box of a dete #### Defined in -[HfInference.ts:501](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L501) +[tasks/cv/objectDetection.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L16) ___ @@ -31,7 +31,7 @@ The label for the class (model specific) of a detected object. #### Defined in -[HfInference.ts:510](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L510) +[tasks/cv/objectDetection.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L25) ___ @@ -43,4 +43,4 @@ A float that represents how likely it is that the detected object belongs to the #### Defined in -[HfInference.ts:515](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L515) +[tasks/cv/objectDetection.ts:30](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L30) diff --git a/docs/inference/interfaces/Options.md b/docs/inference/interfaces/Options.md index fd735393d..fc1a0ca0f 100644 --- a/docs/inference/interfaces/Options.md +++ b/docs/inference/interfaces/Options.md @@ -10,7 +10,7 @@ #### Defined in -[HfInference.ts:19](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L19) +[types.ts:13](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L13) ___ @@ -22,7 +22,7 @@ ___ #### Defined in -[HfInference.ts:11](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L11) +[types.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L5) ___ @@ -34,7 +34,7 @@ ___ #### Defined in -[HfInference.ts:15](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L15) +[types.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L9) ___ @@ -46,7 +46,7 @@ ___ #### Defined in -[HfInference.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L23) +[types.ts:17](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L17) ___ @@ -58,4 +58,4 @@ ___ #### Defined in -[HfInference.ts:28](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L28) +[types.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L22) diff --git a/docs/inference/interfaces/QuestionAnswerReturn.md b/docs/inference/interfaces/QuestionAnswerReturn.md deleted file mode 100644 index 5326b3ccf..000000000 --- a/docs/inference/interfaces/QuestionAnswerReturn.md +++ /dev/null @@ -1,49 +0,0 @@ -# Interface: QuestionAnswerReturn - -## Properties - -### answer - -• **answer**: `string` - -A string that’s the answer within the text. - -#### Defined in - -[HfInference.ts:113](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L113) - -___ - -### end - -• **end**: `number` - -The index (string wise) of the stop of the answer within context. - -#### Defined in - -[HfInference.ts:117](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L117) - -___ - -### score - -• **score**: `number` - -A float that represents how likely that the answer is correct - -#### Defined in - -[HfInference.ts:121](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L121) - -___ - -### start - -• **start**: `number` - -The index (string wise) of the start of the answer within context. - -#### Defined in - -[HfInference.ts:125](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L125) diff --git a/docs/inference/interfaces/QuestionAnsweringOutput.md b/docs/inference/interfaces/QuestionAnsweringOutput.md new file mode 100644 index 000000000..9aa1e5307 --- /dev/null +++ b/docs/inference/interfaces/QuestionAnsweringOutput.md @@ -0,0 +1,49 @@ +# Interface: QuestionAnsweringOutput + +## Properties + +### answer + +• **answer**: `string` + +A string that’s the answer within the text. + +#### Defined in + +[tasks/nlp/questionAnswering.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L16) + +___ + +### end + +• **end**: `number` + +The index (string wise) of the stop of the answer within context. + +#### Defined in + +[tasks/nlp/questionAnswering.ts:20](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L20) + +___ + +### score + +• **score**: `number` + +A float that represents how likely that the answer is correct + +#### Defined in + +[tasks/nlp/questionAnswering.ts:24](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L24) + +___ + +### start + +• **start**: `number` + +The index (string wise) of the start of the answer within context. + +#### Defined in + +[tasks/nlp/questionAnswering.ts:28](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L28) diff --git a/docs/inference/interfaces/SummarizationOutput.md b/docs/inference/interfaces/SummarizationOutput.md new file mode 100644 index 000000000..37cc6490a --- /dev/null +++ b/docs/inference/interfaces/SummarizationOutput.md @@ -0,0 +1,13 @@ +# Interface: SummarizationOutput + +## Properties + +### summary\_text + +• **summary\_text**: `string` + +The string after translation + +#### Defined in + +[tasks/nlp/summarization.ts:46](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/summarization.ts#L46) diff --git a/docs/inference/interfaces/SummarizationReturn.md b/docs/inference/interfaces/SummarizationReturn.md deleted file mode 100644 index 9b878695f..000000000 --- a/docs/inference/interfaces/SummarizationReturn.md +++ /dev/null @@ -1,13 +0,0 @@ -# Interface: SummarizationReturn - -## Properties - -### summary\_text - -• **summary\_text**: `string` - -The string after translation - -#### Defined in - -[HfInference.ts:99](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L99) diff --git a/docs/inference/interfaces/TableQuestionAnswerReturn.md b/docs/inference/interfaces/TableQuestionAnswerReturn.md deleted file mode 100644 index 4964df59d..000000000 --- a/docs/inference/interfaces/TableQuestionAnswerReturn.md +++ /dev/null @@ -1,49 +0,0 @@ -# Interface: TableQuestionAnswerReturn - -## Properties - -### aggregator - -• **aggregator**: `string` - -The aggregator used to get the answer - -#### Defined in - -[HfInference.ts:145](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L145) - -___ - -### answer - -• **answer**: `string` - -The plaintext answer - -#### Defined in - -[HfInference.ts:149](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L149) - -___ - -### cells - -• **cells**: `string`[] - -A list of coordinates of the cells contents - -#### Defined in - -[HfInference.ts:153](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L153) - -___ - -### coordinates - -• **coordinates**: `number`[][] - -a list of coordinates of the cells referenced in the answer - -#### Defined in - -[HfInference.ts:157](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L157) diff --git a/docs/inference/interfaces/TableQuestionAnsweringOutput.md b/docs/inference/interfaces/TableQuestionAnsweringOutput.md new file mode 100644 index 000000000..fa43c20b6 --- /dev/null +++ b/docs/inference/interfaces/TableQuestionAnsweringOutput.md @@ -0,0 +1,49 @@ +# Interface: TableQuestionAnsweringOutput + +## Properties + +### aggregator + +• **aggregator**: `string` + +The aggregator used to get the answer + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L22) + +___ + +### answer + +• **answer**: `string` + +The plaintext answer + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:26](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L26) + +___ + +### cells + +• **cells**: `string`[] + +A list of coordinates of the cells contents + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:30](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L30) + +___ + +### coordinates + +• **coordinates**: `number`[][] + +a list of coordinates of the cells referenced in the answer + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L34) diff --git a/docs/inference/interfaces/TextGenerationOutput.md b/docs/inference/interfaces/TextGenerationOutput.md new file mode 100644 index 000000000..33e24de36 --- /dev/null +++ b/docs/inference/interfaces/TextGenerationOutput.md @@ -0,0 +1,13 @@ +# Interface: TextGenerationOutput + +## Properties + +### generated\_text + +• **generated\_text**: `string` + +The continuated string + +#### Defined in + +[tasks/nlp/textGeneration.ts:54](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGeneration.ts#L54) diff --git a/docs/inference/interfaces/TextGenerationReturn.md b/docs/inference/interfaces/TextGenerationReturn.md deleted file mode 100644 index fff537406..000000000 --- a/docs/inference/interfaces/TextGenerationReturn.md +++ /dev/null @@ -1,13 +0,0 @@ -# Interface: TextGenerationReturn - -## Properties - -### generated\_text - -• **generated\_text**: `string` - -The continuated string - -#### Defined in - -[HfInference.ts:227](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L227) diff --git a/docs/inference/interfaces/TextGenerationStreamBestOfSequence.md b/docs/inference/interfaces/TextGenerationStreamBestOfSequence.md index 8fcc240a9..138941712 100644 --- a/docs/inference/interfaces/TextGenerationStreamBestOfSequence.md +++ b/docs/inference/interfaces/TextGenerationStreamBestOfSequence.md @@ -4,13 +4,13 @@ ### finish\_reason -• **finish\_reason**: [`TextGenerationStreamFinishReason`](../enums/TextGenerationStreamFinishReason) +• **finish\_reason**: [`TextGenerationStreamFinishReason`](../modules#textgenerationstreamfinishreason) Generation finish reason #### Defined in -[HfInference.ts:260](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L260) +[tasks/nlp/textGenerationStream.ts:35](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L35) ___ @@ -22,7 +22,7 @@ Generated text #### Defined in -[HfInference.ts:258](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L258) +[tasks/nlp/textGenerationStream.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L33) ___ @@ -34,7 +34,7 @@ Number of generated tokens #### Defined in -[HfInference.ts:262](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L262) +[tasks/nlp/textGenerationStream.ts:37](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L37) ___ @@ -46,7 +46,7 @@ Prompt tokens #### Defined in -[HfInference.ts:266](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L266) +[tasks/nlp/textGenerationStream.ts:41](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L41) ___ @@ -58,7 +58,7 @@ Sampling seed if sampling was activated #### Defined in -[HfInference.ts:264](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L264) +[tasks/nlp/textGenerationStream.ts:39](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L39) ___ @@ -70,4 +70,4 @@ Generated tokens #### Defined in -[HfInference.ts:268](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L268) +[tasks/nlp/textGenerationStream.ts:43](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L43) diff --git a/docs/inference/interfaces/TextGenerationStreamDetails.md b/docs/inference/interfaces/TextGenerationStreamDetails.md index 607326511..6e5202c3b 100644 --- a/docs/inference/interfaces/TextGenerationStreamDetails.md +++ b/docs/inference/interfaces/TextGenerationStreamDetails.md @@ -10,19 +10,19 @@ Additional sequences when using the `best_of` parameter #### Defined in -[HfInference.ts:292](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L292) +[tasks/nlp/textGenerationStream.ts:66](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L66) ___ ### finish\_reason -• **finish\_reason**: [`TextGenerationStreamFinishReason`](../enums/TextGenerationStreamFinishReason) +• **finish\_reason**: [`TextGenerationStreamFinishReason`](../modules#textgenerationstreamfinishreason) Generation finish reason #### Defined in -[HfInference.ts:282](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L282) +[tasks/nlp/textGenerationStream.ts:56](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L56) ___ @@ -34,7 +34,7 @@ Number of generated tokens #### Defined in -[HfInference.ts:284](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L284) +[tasks/nlp/textGenerationStream.ts:58](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L58) ___ @@ -46,7 +46,7 @@ Prompt tokens #### Defined in -[HfInference.ts:288](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L288) +[tasks/nlp/textGenerationStream.ts:62](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L62) ___ @@ -58,7 +58,7 @@ Sampling seed if sampling was activated #### Defined in -[HfInference.ts:286](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L286) +[tasks/nlp/textGenerationStream.ts:60](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L60) ___ @@ -68,4 +68,4 @@ ___ #### Defined in -[HfInference.ts:290](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L290) +[tasks/nlp/textGenerationStream.ts:64](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L64) diff --git a/docs/inference/interfaces/TextGenerationStreamOutput.md b/docs/inference/interfaces/TextGenerationStreamOutput.md new file mode 100644 index 000000000..dfa748a36 --- /dev/null +++ b/docs/inference/interfaces/TextGenerationStreamOutput.md @@ -0,0 +1,39 @@ +# Interface: TextGenerationStreamOutput + +## Properties + +### details + +• **details**: ``null`` \| [`TextGenerationStreamDetails`](TextGenerationStreamDetails) + +Generation details +Only available when the generation is finished + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:81](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L81) + +___ + +### generated\_text + +• **generated\_text**: ``null`` \| `string` + +Complete generated text +Only available when the generation is finished + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:76](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L76) + +___ + +### token + +• **token**: [`TextGenerationStreamToken`](TextGenerationStreamToken) + +Generated token, one at a time + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:71](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L71) diff --git a/docs/inference/interfaces/TextGenerationStreamPrefillToken.md b/docs/inference/interfaces/TextGenerationStreamPrefillToken.md index 52d9c9718..2ffc47c63 100644 --- a/docs/inference/interfaces/TextGenerationStreamPrefillToken.md +++ b/docs/inference/interfaces/TextGenerationStreamPrefillToken.md @@ -10,7 +10,7 @@ Token ID from the model tokenizer #### Defined in -[HfInference.ts:246](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L246) +[tasks/nlp/textGenerationStream.ts:21](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L21) ___ @@ -23,7 +23,7 @@ Optional since the logprob of the first token cannot be computed #### Defined in -[HfInference.ts:253](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L253) +[tasks/nlp/textGenerationStream.ts:28](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L28) ___ @@ -35,4 +35,4 @@ Token text #### Defined in -[HfInference.ts:248](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L248) +[tasks/nlp/textGenerationStream.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L23) diff --git a/docs/inference/interfaces/TextGenerationStreamReturn.md b/docs/inference/interfaces/TextGenerationStreamReturn.md deleted file mode 100644 index a83ac1bd5..000000000 --- a/docs/inference/interfaces/TextGenerationStreamReturn.md +++ /dev/null @@ -1,39 +0,0 @@ -# Interface: TextGenerationStreamReturn - -## Properties - -### details - -• **details**: ``null`` \| [`TextGenerationStreamDetails`](TextGenerationStreamDetails) - -Generation details -Only available when the generation is finished - -#### Defined in - -[HfInference.ts:307](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L307) - -___ - -### generated\_text - -• **generated\_text**: ``null`` \| `string` - -Complete generated text -Only available when the generation is finished - -#### Defined in - -[HfInference.ts:302](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L302) - -___ - -### token - -• **token**: [`TextGenerationStreamToken`](TextGenerationStreamToken) - -Generated token, one at a time - -#### Defined in - -[HfInference.ts:297](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L297) diff --git a/docs/inference/interfaces/TextGenerationStreamToken.md b/docs/inference/interfaces/TextGenerationStreamToken.md index a54782d4c..6a7ec998f 100644 --- a/docs/inference/interfaces/TextGenerationStreamToken.md +++ b/docs/inference/interfaces/TextGenerationStreamToken.md @@ -10,7 +10,7 @@ Token ID from the model tokenizer #### Defined in -[HfInference.ts:232](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L232) +[tasks/nlp/textGenerationStream.ts:7](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L7) ___ @@ -22,7 +22,7 @@ Logprob #### Defined in -[HfInference.ts:236](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L236) +[tasks/nlp/textGenerationStream.ts:11](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L11) ___ @@ -35,7 +35,7 @@ Can be used to ignore tokens when concatenating #### Defined in -[HfInference.ts:241](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L241) +[tasks/nlp/textGenerationStream.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L16) ___ @@ -47,4 +47,4 @@ Token text #### Defined in -[HfInference.ts:234](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L234) +[tasks/nlp/textGenerationStream.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L9) diff --git a/docs/inference/interfaces/TokenClassificationOutputValue.md b/docs/inference/interfaces/TokenClassificationOutputValue.md new file mode 100644 index 000000000..a2fe90cc5 --- /dev/null +++ b/docs/inference/interfaces/TokenClassificationOutputValue.md @@ -0,0 +1,61 @@ +# Interface: TokenClassificationOutputValue + +## Properties + +### end + +• **end**: `number` + +The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + +#### Defined in + +[tasks/nlp/tokenClassification.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L33) + +___ + +### entity\_group + +• **entity\_group**: `string` + +The type for the entity being recognized (model specific). + +#### Defined in + +[tasks/nlp/tokenClassification.ts:37](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L37) + +___ + +### score + +• **score**: `number` + +How likely the entity was recognized. + +#### Defined in + +[tasks/nlp/tokenClassification.ts:41](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L41) + +___ + +### start + +• **start**: `number` + +The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + +#### Defined in + +[tasks/nlp/tokenClassification.ts:45](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L45) + +___ + +### word + +• **word**: `string` + +The string that was captured + +#### Defined in + +[tasks/nlp/tokenClassification.ts:49](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L49) diff --git a/docs/inference/interfaces/TokenClassificationReturnValue.md b/docs/inference/interfaces/TokenClassificationReturnValue.md deleted file mode 100644 index fffaefeb1..000000000 --- a/docs/inference/interfaces/TokenClassificationReturnValue.md +++ /dev/null @@ -1,61 +0,0 @@ -# Interface: TokenClassificationReturnValue - -## Properties - -### end - -• **end**: `number` - -The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. - -#### Defined in - -[HfInference.ts:337](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L337) - -___ - -### entity\_group - -• **entity\_group**: `string` - -The type for the entity being recognized (model specific). - -#### Defined in - -[HfInference.ts:341](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L341) - -___ - -### score - -• **score**: `number` - -How likely the entity was recognized. - -#### Defined in - -[HfInference.ts:345](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L345) - -___ - -### start - -• **start**: `number` - -The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. - -#### Defined in - -[HfInference.ts:349](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L349) - -___ - -### word - -• **word**: `string` - -The string that was captured - -#### Defined in - -[HfInference.ts:353](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L353) diff --git a/docs/inference/interfaces/TranslationOutput.md b/docs/inference/interfaces/TranslationOutput.md new file mode 100644 index 000000000..bc375c991 --- /dev/null +++ b/docs/inference/interfaces/TranslationOutput.md @@ -0,0 +1,13 @@ +# Interface: TranslationOutput + +## Properties + +### translation\_text + +• **translation\_text**: `string` + +The string after translation + +#### Defined in + +[tasks/nlp/translation.ts:16](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/translation.ts#L16) diff --git a/docs/inference/interfaces/TranslationReturn.md b/docs/inference/interfaces/TranslationReturn.md deleted file mode 100644 index 6898a3ac3..000000000 --- a/docs/inference/interfaces/TranslationReturn.md +++ /dev/null @@ -1,13 +0,0 @@ -# Interface: TranslationReturn - -## Properties - -### translation\_text - -• **translation\_text**: `string` - -The string after translation - -#### Defined in - -[HfInference.ts:369](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L369) diff --git a/docs/inference/interfaces/ZeroShotClassificationOutputValue.md b/docs/inference/interfaces/ZeroShotClassificationOutputValue.md new file mode 100644 index 000000000..e6f3f5f01 --- /dev/null +++ b/docs/inference/interfaces/ZeroShotClassificationOutputValue.md @@ -0,0 +1,31 @@ +# Interface: ZeroShotClassificationOutputValue + +## Properties + +### labels + +• **labels**: `string`[] + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:24](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L24) + +___ + +### scores + +• **scores**: `number`[] + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L25) + +___ + +### sequence + +• **sequence**: `string` + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:26](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L26) diff --git a/docs/inference/interfaces/ZeroShotClassificationReturnValue.md b/docs/inference/interfaces/ZeroShotClassificationReturnValue.md deleted file mode 100644 index 7ae4da94f..000000000 --- a/docs/inference/interfaces/ZeroShotClassificationReturnValue.md +++ /dev/null @@ -1,31 +0,0 @@ -# Interface: ZeroShotClassificationReturnValue - -## Properties - -### labels - -• **labels**: `string`[] - -#### Defined in - -[HfInference.ts:390](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L390) - -___ - -### scores - -• **scores**: `number`[] - -#### Defined in - -[HfInference.ts:391](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L391) - -___ - -### sequence - -• **sequence**: `string` - -#### Defined in - -[HfInference.ts:392](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L392) diff --git a/docs/inference/modules.md b/docs/inference/modules.md index 3b76a946d..ef16cf164 100644 --- a/docs/inference/modules.md +++ b/docs/inference/modules.md @@ -1,315 +1,878 @@ # @huggingface/inference -## Enumerations - -- [TextGenerationStreamFinishReason](enums/TextGenerationStreamFinishReason) - ## Classes - [HfInference](classes/HfInference) +- [HfInferenceEndpoint](classes/HfInferenceEndpoint) ## Interfaces -- [Args](interfaces/Args) -- [AudioClassificationReturnValue](interfaces/AudioClassificationReturnValue) -- [AutomaticSpeechRecognitionReturn](interfaces/AutomaticSpeechRecognitionReturn) -- [ConversationalReturn](interfaces/ConversationalReturn) -- [ImageClassificationReturnValue](interfaces/ImageClassificationReturnValue) -- [ImageSegmentationReturnValue](interfaces/ImageSegmentationReturnValue) -- [ImageToTextReturn](interfaces/ImageToTextReturn) -- [ObjectDetectionReturnValue](interfaces/ObjectDetectionReturnValue) +- [AudioClassificationOutputValue](interfaces/AudioClassificationOutputValue) +- [AutomaticSpeechRecognitionOutput](interfaces/AutomaticSpeechRecognitionOutput) +- [BaseArgs](interfaces/BaseArgs) +- [ConversationalOutput](interfaces/ConversationalOutput) +- [ImageClassificationOutputValue](interfaces/ImageClassificationOutputValue) +- [ImageSegmentationOutputValue](interfaces/ImageSegmentationOutputValue) +- [ImageToTextOutput](interfaces/ImageToTextOutput) +- [ObjectDetectionOutputValue](interfaces/ObjectDetectionOutputValue) - [Options](interfaces/Options) -- [QuestionAnswerReturn](interfaces/QuestionAnswerReturn) -- [SummarizationReturn](interfaces/SummarizationReturn) -- [TableQuestionAnswerReturn](interfaces/TableQuestionAnswerReturn) -- [TextGenerationReturn](interfaces/TextGenerationReturn) +- [QuestionAnsweringOutput](interfaces/QuestionAnsweringOutput) +- [SummarizationOutput](interfaces/SummarizationOutput) +- [TableQuestionAnsweringOutput](interfaces/TableQuestionAnsweringOutput) +- [TextGenerationOutput](interfaces/TextGenerationOutput) - [TextGenerationStreamBestOfSequence](interfaces/TextGenerationStreamBestOfSequence) - [TextGenerationStreamDetails](interfaces/TextGenerationStreamDetails) +- [TextGenerationStreamOutput](interfaces/TextGenerationStreamOutput) - [TextGenerationStreamPrefillToken](interfaces/TextGenerationStreamPrefillToken) -- [TextGenerationStreamReturn](interfaces/TextGenerationStreamReturn) - [TextGenerationStreamToken](interfaces/TextGenerationStreamToken) -- [TokenClassificationReturnValue](interfaces/TokenClassificationReturnValue) -- [TranslationReturn](interfaces/TranslationReturn) -- [ZeroShotClassificationReturnValue](interfaces/ZeroShotClassificationReturnValue) +- [TokenClassificationOutputValue](interfaces/TokenClassificationOutputValue) +- [TranslationOutput](interfaces/TranslationOutput) +- [ZeroShotClassificationOutputValue](interfaces/ZeroShotClassificationOutputValue) ## Type Aliases ### AudioClassificationArgs -Ƭ **AudioClassificationArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **AudioClassificationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } #### Defined in -[HfInference.ts:558](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L558) +[tasks/audio/audioClassification.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L5) ___ ### AudioClassificationReturn -Ƭ **AudioClassificationReturn**: [`AudioClassificationReturnValue`](interfaces/AudioClassificationReturnValue)[] +Ƭ **AudioClassificationReturn**: [`AudioClassificationOutputValue`](interfaces/AudioClassificationOutputValue)[] #### Defined in -[HfInference.ts:577](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L577) +[tasks/audio/audioClassification.ts:24](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L24) ___ ### AutomaticSpeechRecognitionArgs -Ƭ **AutomaticSpeechRecognitionArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **AutomaticSpeechRecognitionArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } #### Defined in -[HfInference.ts:544](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L544) +[tasks/audio/automaticSpeechRecognition.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/automaticSpeechRecognition.ts#L5) ___ ### ConversationalArgs -Ƭ **ConversationalArgs**: [`Args`](interfaces/Args) & { `inputs`: { `generated_responses?`: `string`[] ; `past_user_inputs?`: `string`[] ; `text`: `string` } ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } +Ƭ **ConversationalArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: { `generated_responses?`: `string`[] ; `past_user_inputs?`: `string`[] ; `text`: `string` } ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } #### Defined in -[HfInference.ts:397](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L397) +[tasks/nlp/conversational.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L5) ___ ### FeatureExtractionArgs -Ƭ **FeatureExtractionArgs**: [`Args`](interfaces/Args) & { `inputs`: `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] } +Ƭ **FeatureExtractionArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` \| `string`[] } #### Defined in -[HfInference.ts:453](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L453) +[tasks/nlp/featureExtraction.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/featureExtraction.ts#L5) ___ -### FeatureExtractionReturn +### FeatureExtractionOutput -Ƭ **FeatureExtractionReturn**: (`number` \| `number`[])[] +Ƭ **FeatureExtractionOutput**: (`number` \| `number`[])[] Returned values are a list of floats, or a list of list of floats (depending on if you sent a string or a list of string, and if the automatic reduction, usually mean_pooling for instance was applied for you or not. This should be explained on the model's README. #### Defined in -[HfInference.ts:468](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L468) +[tasks/nlp/featureExtraction.ts:18](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/featureExtraction.ts#L18) ___ ### FillMaskArgs -Ƭ **FillMaskArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` } +Ƭ **FillMaskArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` } #### Defined in -[HfInference.ts:35](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L35) +[tasks/nlp/fillMask.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/fillMask.ts#L5) ___ -### FillMaskReturn +### FillMaskOutput -Ƭ **FillMaskReturn**: { `score`: `number` ; `sequence`: `string` ; `token`: `number` ; `token_str`: `string` }[] +Ƭ **FillMaskOutput**: { `score`: `number` ; `sequence`: `string` ; `token`: `number` ; `token_str`: `string` }[] #### Defined in -[HfInference.ts:39](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L39) +[tasks/nlp/fillMask.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/fillMask.ts#L9) ___ ### ImageClassificationArgs -Ƭ **ImageClassificationArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **ImageClassificationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } #### Defined in -[HfInference.ts:470](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L470) +[tasks/cv/imageClassification.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L5) ___ -### ImageClassificationReturn +### ImageClassificationOutput -Ƭ **ImageClassificationReturn**: [`ImageClassificationReturnValue`](interfaces/ImageClassificationReturnValue)[] +Ƭ **ImageClassificationOutput**: [`ImageClassificationOutputValue`](interfaces/ImageClassificationOutputValue)[] #### Defined in -[HfInference.ts:488](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L488) +[tasks/cv/imageClassification.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L23) ___ ### ImageSegmentationArgs -Ƭ **ImageSegmentationArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **ImageSegmentationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } #### Defined in -[HfInference.ts:520](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L520) +[tasks/cv/imageSegmentation.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L5) ___ -### ImageSegmentationReturn +### ImageSegmentationOutput -Ƭ **ImageSegmentationReturn**: [`ImageSegmentationReturnValue`](interfaces/ImageSegmentationReturnValue)[] +Ƭ **ImageSegmentationOutput**: [`ImageSegmentationOutputValue`](interfaces/ImageSegmentationOutputValue)[] #### Defined in -[HfInference.ts:542](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L542) +[tasks/cv/imageSegmentation.ts:27](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L27) ___ ### ImageToTextArgs -Ƭ **ImageToTextArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **ImageToTextArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } #### Defined in -[HfInference.ts:611](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L611) +[tasks/cv/imageToText.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageToText.ts#L5) ___ ### ObjectDetectionArgs -Ƭ **ObjectDetectionArgs**: [`Args`](interfaces/Args) & { `data`: `Blob` \| `ArrayBuffer` } +Ƭ **ObjectDetectionArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } + +#### Defined in + +[tasks/cv/objectDetection.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L5) + +___ + +### ObjectDetectionOutput + +Ƭ **ObjectDetectionOutput**: [`ObjectDetectionOutputValue`](interfaces/ObjectDetectionOutputValue)[] + +#### Defined in + +[tasks/cv/objectDetection.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L33) + +___ + +### QuestionAnsweringArgs + +Ƭ **QuestionAnsweringArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: { `context`: `string` ; `question`: `string` } } + +#### Defined in + +[tasks/nlp/questionAnswering.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L5) + +___ + +### RequestArgs + +Ƭ **RequestArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `data`: `Blob` \| `ArrayBuffer` } \| { `inputs`: `unknown` } & { `accessToken?`: `string` ; `parameters?`: `Record`<`string`, `unknown`\> } #### Defined in -[HfInference.ts:490](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L490) +[types.ts:38](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/types.ts#L38) ___ -### ObjectDetectionReturn +### SentenceSimilarityArgs -Ƭ **ObjectDetectionReturn**: [`ObjectDetectionReturnValue`](interfaces/ObjectDetectionReturnValue)[] +Ƭ **SentenceSimilarityArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `Record`<`string`, `unknown`\> \| `Record`<`string`, `unknown`\>[] } #### Defined in -[HfInference.ts:518](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L518) +[tasks/nlp/sentenceSimilarity.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/sentenceSimilarity.ts#L5) ___ -### QuestionAnswerArgs +### SentenceSimilarityOutput + +Ƭ **SentenceSimilarityOutput**: `number`[] -Ƭ **QuestionAnswerArgs**: [`Args`](interfaces/Args) & { `inputs`: { `context`: `string` ; `question`: `string` } } +Returned values are a list of floats #### Defined in -[HfInference.ts:102](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L102) +[tasks/nlp/sentenceSimilarity.ts:20](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/sentenceSimilarity.ts#L20) ___ ### SummarizationArgs -Ƭ **SummarizationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } +Ƭ **SummarizationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` ; `parameters?`: { `max_length?`: `number` ; `max_time?`: `number` ; `min_length?`: `number` ; `repetition_penalty?`: `number` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } #### Defined in -[HfInference.ts:58](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L58) +[tasks/nlp/summarization.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/summarization.ts#L5) ___ -### TableQuestionAnswerArgs +### TableQuestionAnsweringArgs -Ƭ **TableQuestionAnswerArgs**: [`Args`](interfaces/Args) & { `inputs`: { `query`: `string` ; `table`: `Record`<`string`, `string`[]\> } } +Ƭ **TableQuestionAnsweringArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: { `query`: `string` ; `table`: `Record`<`string`, `string`[]\> } } #### Defined in -[HfInference.ts:128](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L128) +[tasks/nlp/tableQuestionAnswering.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L5) ___ ### TextClassificationArgs -Ƭ **TextClassificationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` } +Ƭ **TextClassificationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` } #### Defined in -[HfInference.ts:160](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L160) +[tasks/nlp/textClassification.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textClassification.ts#L5) ___ -### TextClassificationReturn +### TextClassificationOutput -Ƭ **TextClassificationReturn**: { `label`: `string` ; `score`: `number` }[] +Ƭ **TextClassificationOutput**: { `label`: `string` ; `score`: `number` }[] #### Defined in -[HfInference.ts:167](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L167) +[tasks/nlp/textClassification.ts:12](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textClassification.ts#L12) ___ ### TextGenerationArgs -Ƭ **TextGenerationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } +Ƭ **TextGenerationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` ; `parameters?`: { `do_sample?`: `boolean` ; `max_new_tokens?`: `number` ; `max_time?`: `number` ; `num_return_sequences?`: `number` ; `repetition_penalty?`: `number` ; `return_full_text?`: `boolean` ; `temperature?`: `number` ; `top_k?`: `number` ; `top_p?`: `number` } } #### Defined in -[HfInference.ts:178](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L178) +[tasks/nlp/textGeneration.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGeneration.ts#L5) + +___ + +### TextGenerationStreamFinishReason + +Ƭ **TextGenerationStreamFinishReason**: ``"length"`` \| ``"eos_token"`` \| ``"stop_sequence"`` + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:46](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L46) ___ ### TextToImageArgs -Ƭ **TextToImageArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` ; `parameters?`: { `guidance_scale?`: `number` ; `height?`: `number` ; `negative_prompt?`: `string` ; `num_inference_steps?`: `number` ; `width?`: `number` } } +Ƭ **TextToImageArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` ; `parameters?`: { `guidance_scale?`: `number` ; `height?`: `number` ; `negative_prompt?`: `string` ; `num_inference_steps?`: `number` ; `width?`: `number` } } #### Defined in -[HfInference.ts:579](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L579) +[tasks/cv/textToImage.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/textToImage.ts#L5) ___ -### TextToImageReturn +### TextToImageOutput -Ƭ **TextToImageReturn**: `Blob` +Ƭ **TextToImageOutput**: `Blob` #### Defined in -[HfInference.ts:609](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L609) +[tasks/cv/textToImage.ts:35](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/textToImage.ts#L35) ___ ### TokenClassificationArgs -Ƭ **TokenClassificationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` ; `parameters?`: { `aggregation_strategy?`: ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` } } +Ƭ **TokenClassificationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` ; `parameters?`: { `aggregation_strategy?`: ``"none"`` \| ``"simple"`` \| ``"first"`` \| ``"average"`` \| ``"max"`` } } #### Defined in -[HfInference.ts:310](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L310) +[tasks/nlp/tokenClassification.ts:6](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L6) ___ -### TokenClassificationReturn +### TokenClassificationOutput -Ƭ **TokenClassificationReturn**: [`TokenClassificationReturnValue`](interfaces/TokenClassificationReturnValue)[] +Ƭ **TokenClassificationOutput**: [`TokenClassificationOutputValue`](interfaces/TokenClassificationOutputValue)[] #### Defined in -[HfInference.ts:356](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L356) +[tasks/nlp/tokenClassification.ts:52](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L52) ___ ### TranslationArgs -Ƭ **TranslationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` } +Ƭ **TranslationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` } #### Defined in -[HfInference.ts:358](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L358) +[tasks/nlp/translation.ts:5](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/translation.ts#L5) ___ ### ZeroShotClassificationArgs -Ƭ **ZeroShotClassificationArgs**: [`Args`](interfaces/Args) & { `inputs`: `string` \| `string`[] ; `parameters`: { `candidate_labels`: `string`[] ; `multi_label?`: `boolean` } } +Ƭ **ZeroShotClassificationArgs**: [`BaseArgs`](interfaces/BaseArgs) & { `inputs`: `string` \| `string`[] ; `parameters`: { `candidate_labels`: `string`[] ; `multi_label?`: `boolean` } } + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:6](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L6) + +___ + +### ZeroShotClassificationOutput + +Ƭ **ZeroShotClassificationOutput**: [`ZeroShotClassificationOutputValue`](interfaces/ZeroShotClassificationOutputValue)[] + +#### Defined in + +[tasks/nlp/zeroShotClassification.ts:29](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L29) + +## Functions + +### audioClassification + +▸ **audioClassification**(`args`, `options?`): `Promise`<[`AudioClassificationReturn`](modules#audioclassificationreturn)\> + +This task reads some audio input and outputs the likelihood of classes. +Recommended model: superb/hubert-large-superb-er + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`AudioClassificationArgs`](modules#audioclassificationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`AudioClassificationReturn`](modules#audioclassificationreturn)\> + +#### Defined in + +[tasks/audio/audioClassification.ts:30](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/audioClassification.ts#L30) + +___ + +### automaticSpeechRecognition + +▸ **automaticSpeechRecognition**(`args`, `options?`): `Promise`<[`AutomaticSpeechRecognitionOutput`](interfaces/AutomaticSpeechRecognitionOutput)\> + +This task reads some audio input and outputs the said words within the audio files. +Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`AutomaticSpeechRecognitionArgs`](modules#automaticspeechrecognitionargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`AutomaticSpeechRecognitionOutput`](interfaces/AutomaticSpeechRecognitionOutput)\> + +#### Defined in + +[tasks/audio/automaticSpeechRecognition.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/audio/automaticSpeechRecognition.ts#L23) + +___ + +### conversational + +▸ **conversational**(`args`, `options?`): `Promise`<[`ConversationalOutput`](interfaces/ConversationalOutput)\> + +This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ConversationalArgs`](modules#conversationalargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`ConversationalOutput`](interfaces/ConversationalOutput)\> + +#### Defined in + +[tasks/nlp/conversational.ts:65](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/conversational.ts#L65) + +___ + +### featureExtraction + +▸ **featureExtraction**(`args`, `options?`): `Promise`<[`FeatureExtractionOutput`](modules#featureextractionoutput)\> + +This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`FeatureExtractionArgs`](modules#featureextractionargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`FeatureExtractionOutput`](modules#featureextractionoutput)\> + +#### Defined in + +[tasks/nlp/featureExtraction.ts:23](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/featureExtraction.ts#L23) + +___ + +### fillMask + +▸ **fillMask**(`args`, `options?`): `Promise`<[`FillMaskOutput`](modules#fillmaskoutput)\> + +Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`FillMaskArgs`](modules#fillmaskargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`FillMaskOutput`](modules#fillmaskoutput)\> + +#### Defined in + +[tasks/nlp/fillMask.ts:31](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/fillMask.ts#L31) + +___ + +### imageClassification + +▸ **imageClassification**(`args`, `options?`): `Promise`<[`ImageClassificationOutput`](modules#imageclassificationoutput)\> + +This task reads some image input and outputs the likelihood of classes. +Recommended model: google/vit-base-patch16-224 + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ImageClassificationArgs`](modules#imageclassificationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`ImageClassificationOutput`](modules#imageclassificationoutput)\> + +#### Defined in + +[tasks/cv/imageClassification.ts:29](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageClassification.ts#L29) + +___ + +### imageSegmentation + +▸ **imageSegmentation**(`args`, `options?`): `Promise`<[`ImageSegmentationOutput`](modules#imagesegmentationoutput)\> + +This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. +Recommended model: facebook/detr-resnet-50-panoptic + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ImageSegmentationArgs`](modules#imagesegmentationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`ImageSegmentationOutput`](modules#imagesegmentationoutput)\> + +#### Defined in + +[tasks/cv/imageSegmentation.ts:33](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageSegmentation.ts#L33) + +___ + +### imageToText + +▸ **imageToText**(`args`, `options?`): `Promise`<[`ImageToTextOutput`](interfaces/ImageToTextOutput)\> + +This task reads some image input and outputs the text caption. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ImageToTextArgs`](modules#imagetotextargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`ImageToTextOutput`](interfaces/ImageToTextOutput)\> + +#### Defined in + +[tasks/cv/imageToText.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/imageToText.ts#L22) + +___ + +### objectDetection + +▸ **objectDetection**(`args`, `options?`): `Promise`<[`ObjectDetectionOutput`](modules#objectdetectionoutput)\> + +This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. +Recommended model: facebook/detr-resnet-50 + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ObjectDetectionArgs`](modules#objectdetectionargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`ObjectDetectionOutput`](modules#objectdetectionoutput)\> + +#### Defined in + +[tasks/cv/objectDetection.ts:39](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/objectDetection.ts#L39) + +___ + +### questionAnswering + +▸ **questionAnswering**(`args`, `options?`): `Promise`<[`QuestionAnsweringOutput`](interfaces/QuestionAnsweringOutput)\> + +Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2 + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`QuestionAnsweringArgs`](modules#questionansweringargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`QuestionAnsweringOutput`](interfaces/QuestionAnsweringOutput)\> + +#### Defined in + +[tasks/nlp/questionAnswering.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/questionAnswering.ts#L34) + +___ + +### request + +▸ **request**<`T`\>(`args`, `options?`): `Promise`<`T`\> + +Primitive to make custom calls to the inference API + +#### Type parameters + +| Name | +| :------ | +| `T` | + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`RequestArgs`](modules#requestargs) | +| `options?` | [`Options`](interfaces/Options) & { `includeCredentials?`: `boolean` } | + +#### Returns + +`Promise`<`T`\> #### Defined in -[HfInference.ts:372](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L372) +[tasks/custom/request.ts:7](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/request.ts#L7) ___ -### ZeroShotClassificationReturn +### sentenceSimilarity + +▸ **sentenceSimilarity**(`args`, `options?`): `Promise`<[`SentenceSimilarityOutput`](modules#sentencesimilarityoutput)\> + +Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`SentenceSimilarityArgs`](modules#sentencesimilarityargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`SentenceSimilarityOutput`](modules#sentencesimilarityoutput)\> + +#### Defined in + +[tasks/nlp/sentenceSimilarity.ts:25](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/sentenceSimilarity.ts#L25) + +___ + +### streamingRequest + +▸ **streamingRequest**<`T`\>(`args`, `options?`): `AsyncGenerator`<`T`\> + +Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator + +#### Type parameters + +| Name | +| :------ | +| `T` | + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`RequestArgs`](modules#requestargs) | +| `options?` | [`Options`](interfaces/Options) & { `includeCredentials?`: `boolean` } | + +#### Returns + +`AsyncGenerator`<`T`\> + +#### Defined in + +[tasks/custom/streamingRequest.ts:9](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/custom/streamingRequest.ts#L9) + +___ + +### summarization + +▸ **summarization**(`args`, `options?`): `Promise`<[`SummarizationOutput`](interfaces/SummarizationOutput)\> + +This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`SummarizationArgs`](modules#summarizationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`SummarizationOutput`](interfaces/SummarizationOutput)\> + +#### Defined in + +[tasks/nlp/summarization.ts:52](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/summarization.ts#L52) + +___ + +### tableQuestionAnswering + +▸ **tableQuestionAnswering**(`args`, `options?`): `Promise`<[`TableQuestionAnsweringOutput`](interfaces/TableQuestionAnsweringOutput)\> + +Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TableQuestionAnsweringArgs`](modules#tablequestionansweringargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TableQuestionAnsweringOutput`](interfaces/TableQuestionAnsweringOutput)\> + +#### Defined in + +[tasks/nlp/tableQuestionAnswering.ts:40](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tableQuestionAnswering.ts#L40) + +___ + +### textClassification + +▸ **textClassification**(`args`, `options?`): `Promise`<[`TextClassificationOutput`](modules#textclassificationoutput)\> + +Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TextClassificationArgs`](modules#textclassificationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TextClassificationOutput`](modules#textclassificationoutput)\> + +#### Defined in + +[tasks/nlp/textClassification.ts:26](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textClassification.ts#L26) + +___ + +### textGeneration + +▸ **textGeneration**(`args`, `options?`): `Promise`<[`TextGenerationOutput`](interfaces/TextGenerationOutput)\> + +Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TextGenerationArgs`](modules#textgenerationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TextGenerationOutput`](interfaces/TextGenerationOutput)\> + +#### Defined in + +[tasks/nlp/textGeneration.ts:60](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGeneration.ts#L60) + +___ + +### textGenerationStream + +▸ **textGenerationStream**(`args`, `options?`): `AsyncGenerator`<[`TextGenerationStreamOutput`](interfaces/TextGenerationStreamOutput)\> + +Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TextGenerationArgs`](modules#textgenerationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`AsyncGenerator`<[`TextGenerationStreamOutput`](interfaces/TextGenerationStreamOutput)\> + +#### Defined in + +[tasks/nlp/textGenerationStream.ts:87](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/textGenerationStream.ts#L87) + +___ + +### textToImage + +▸ **textToImage**(`args`, `options?`): `Promise`<[`TextToImageOutput`](modules#texttoimageoutput)\> + +This task reads some text input and outputs an image. +Recommended model: stabilityai/stable-diffusion-2 + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TextToImageArgs`](modules#texttoimageargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TextToImageOutput`](modules#texttoimageoutput)\> + +#### Defined in + +[tasks/cv/textToImage.ts:41](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/cv/textToImage.ts#L41) + +___ + +### tokenClassification + +▸ **tokenClassification**(`args`, `options?`): `Promise`<[`TokenClassificationOutput`](modules#tokenclassificationoutput)\> + +Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TokenClassificationArgs`](modules#tokenclassificationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TokenClassificationOutput`](modules#tokenclassificationoutput)\> + +#### Defined in + +[tasks/nlp/tokenClassification.ts:57](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/tokenClassification.ts#L57) + +___ + +### translation + +▸ **translation**(`args`, `options?`): `Promise`<[`TranslationOutput`](interfaces/TranslationOutput)\> + +This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`TranslationArgs`](modules#translationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns + +`Promise`<[`TranslationOutput`](interfaces/TranslationOutput)\> + +#### Defined in + +[tasks/nlp/translation.ts:22](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/translation.ts#L22) + +___ + +### zeroShotClassification + +▸ **zeroShotClassification**(`args`, `options?`): `Promise`<[`ZeroShotClassificationOutput`](modules#zeroshotclassificationoutput)\> + +This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli. + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `args` | [`ZeroShotClassificationArgs`](modules#zeroshotclassificationargs) | +| `options?` | [`Options`](interfaces/Options) | + +#### Returns -Ƭ **ZeroShotClassificationReturn**: [`ZeroShotClassificationReturnValue`](interfaces/ZeroShotClassificationReturnValue)[] +`Promise`<[`ZeroShotClassificationOutput`](modules#zeroshotclassificationoutput)\> #### Defined in -[HfInference.ts:395](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/HfInference.ts#L395) +[tasks/nlp/zeroShotClassification.ts:34](https://github.com/huggingface/huggingface.js/blob/main/packages/inference/src/tasks/nlp/zeroShotClassification.ts#L34) diff --git a/packages/inference/package.json b/packages/inference/package.json index 287eb7bfe..4423580bd 100644 --- a/packages/inference/package.json +++ b/packages/inference/package.json @@ -1,6 +1,6 @@ { "name": "@huggingface/inference", - "version": "1.8.0", + "version": "2.0.0", "license": "MIT", "author": "Tim Mikeladze ", "description": "Typescript wrapper for the Hugging Face Inference API",