Skip to content

Commit f50fe7c

Browse files
HuggingFaceInfraWauplinhanouticelina
authored
[Bot] Update inference types (#2688)
* Update inference types (automated commit) * fix quality after merging main * another fix * fix tests * Update inference types (automated commit) * Update inference types (automated commit) * fix quality * Update inference types (automated commit) * Update inference types (automated commit) * Update inference types (automated commit) * fix client * activate automatic update for table-question-answering * fix --------- Co-authored-by: Wauplin <11801849+Wauplin@users.noreply.github.com> Co-authored-by: Celina Hanouti <hanouticelina@gmail.com>
1 parent 503d353 commit f50fe7c

File tree

8 files changed

+64
-9
lines changed

8 files changed

+64
-9
lines changed

docs/source/en/package_reference/inference_types.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,8 @@ This part of the lib is still under development and will be improved in future r
239239

240240
[[autodoc]] huggingface_hub.TableQuestionAnsweringOutputElement
241241

242+
[[autodoc]] huggingface_hub.TableQuestionAnsweringParameters
243+
242244

243245

244246
## text2text_generation

docs/source/ko/package_reference/inference_types.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,8 @@ rendered properly in your Markdown viewer.
238238

239239
[[autodoc]] huggingface_hub.TableQuestionAnsweringOutputElement
240240

241+
[[autodoc]] huggingface_hub.TableQuestionAnsweringParameters
242+
241243

242244

243245
## text2text_generation[[huggingface_hub.Text2TextGenerationInput]]

src/huggingface_hub/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -351,6 +351,7 @@
351351
"ObjectDetectionInput",
352352
"ObjectDetectionOutputElement",
353353
"ObjectDetectionParameters",
354+
"Padding",
354355
"QuestionAnsweringInput",
355356
"QuestionAnsweringInputData",
356357
"QuestionAnsweringOutputElement",
@@ -364,6 +365,7 @@
364365
"TableQuestionAnsweringInput",
365366
"TableQuestionAnsweringInputData",
366367
"TableQuestionAnsweringOutputElement",
368+
"TableQuestionAnsweringParameters",
367369
"Text2TextGenerationInput",
368370
"Text2TextGenerationOutput",
369371
"Text2TextGenerationParameters",
@@ -880,6 +882,7 @@ def __dir__():
880882
ObjectDetectionInput, # noqa: F401
881883
ObjectDetectionOutputElement, # noqa: F401
882884
ObjectDetectionParameters, # noqa: F401
885+
Padding, # noqa: F401
883886
QuestionAnsweringInput, # noqa: F401
884887
QuestionAnsweringInputData, # noqa: F401
885888
QuestionAnsweringOutputElement, # noqa: F401
@@ -893,6 +896,7 @@ def __dir__():
893896
TableQuestionAnsweringInput, # noqa: F401
894897
TableQuestionAnsweringInputData, # noqa: F401
895898
TableQuestionAnsweringOutputElement, # noqa: F401
899+
TableQuestionAnsweringParameters, # noqa: F401
896900
Text2TextGenerationInput, # noqa: F401
897901
Text2TextGenerationOutput, # noqa: F401
898902
Text2TextGenerationParameters, # noqa: F401

src/huggingface_hub/inference/_client.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@
8484
ImageToImageTargetSize,
8585
ImageToTextOutput,
8686
ObjectDetectionOutputElement,
87+
Padding,
8788
QuestionAnsweringOutputElement,
8889
SummarizationOutput,
8990
SummarizationTruncationStrategy,
@@ -1654,7 +1655,9 @@ def table_question_answering(
16541655
query: str,
16551656
*,
16561657
model: Optional[str] = None,
1657-
parameters: Optional[Dict[str, Any]] = None,
1658+
padding: Optional["Padding"] = None,
1659+
sequential: Optional[bool] = None,
1660+
truncation: Optional[bool] = None,
16581661
) -> TableQuestionAnsweringOutputElement:
16591662
"""
16601663
Retrieve the answer to a question from information given in a table.
@@ -1668,8 +1671,14 @@ def table_question_answering(
16681671
model (`str`):
16691672
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
16701673
Hub or a URL to a deployed Inference Endpoint.
1671-
parameters (`Dict[str, Any]`, *optional*):
1672-
Additional inference parameters. Defaults to None.
1674+
padding (`"Padding"`, *optional*):
1675+
Activates and controls padding.
1676+
sequential (`bool`, *optional*):
1677+
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
1678+
inference to be done sequentially to extract relations within sequences, given their conversational
1679+
nature.
1680+
truncation (`bool`, *optional*):
1681+
Activates and controls truncation.
16731682
16741683
Returns:
16751684
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
@@ -1690,6 +1699,11 @@ def table_question_answering(
16901699
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
16911700
```
16921701
"""
1702+
parameters = {
1703+
"padding": padding,
1704+
"sequential": sequential,
1705+
"truncation": truncation,
1706+
}
16931707
inputs = {
16941708
"query": query,
16951709
"table": table,

src/huggingface_hub/inference/_generated/_async_client.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@
7070
ImageToImageTargetSize,
7171
ImageToTextOutput,
7272
ObjectDetectionOutputElement,
73+
Padding,
7374
QuestionAnsweringOutputElement,
7475
SummarizationOutput,
7576
SummarizationTruncationStrategy,
@@ -1713,7 +1714,9 @@ async def table_question_answering(
17131714
query: str,
17141715
*,
17151716
model: Optional[str] = None,
1716-
parameters: Optional[Dict[str, Any]] = None,
1717+
padding: Optional["Padding"] = None,
1718+
sequential: Optional[bool] = None,
1719+
truncation: Optional[bool] = None,
17171720
) -> TableQuestionAnsweringOutputElement:
17181721
"""
17191722
Retrieve the answer to a question from information given in a table.
@@ -1727,8 +1730,14 @@ async def table_question_answering(
17271730
model (`str`):
17281731
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
17291732
Hub or a URL to a deployed Inference Endpoint.
1730-
parameters (`Dict[str, Any]`, *optional*):
1731-
Additional inference parameters. Defaults to None.
1733+
padding (`"Padding"`, *optional*):
1734+
Activates and controls padding.
1735+
sequential (`bool`, *optional*):
1736+
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
1737+
inference to be done sequentially to extract relations within sequences, given their conversational
1738+
nature.
1739+
truncation (`bool`, *optional*):
1740+
Activates and controls truncation.
17321741
17331742
Returns:
17341743
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
@@ -1750,6 +1759,11 @@ async def table_question_answering(
17501759
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
17511760
```
17521761
"""
1762+
parameters = {
1763+
"padding": padding,
1764+
"sequential": sequential,
1765+
"truncation": truncation,
1766+
}
17531767
inputs = {
17541768
"query": query,
17551769
"table": table,

src/huggingface_hub/inference/_generated/types/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,11 @@
101101
SummarizationTruncationStrategy,
102102
)
103103
from .table_question_answering import (
104+
Padding,
104105
TableQuestionAnsweringInput,
105106
TableQuestionAnsweringInputData,
106107
TableQuestionAnsweringOutputElement,
108+
TableQuestionAnsweringParameters,
107109
)
108110
from .text2text_generation import (
109111
Text2TextGenerationInput,

src/huggingface_hub/inference/_generated/types/table_question_answering.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
55
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
66
from dataclasses import dataclass
7-
from typing import Any, Dict, List, Optional
7+
from typing import Dict, List, Literal, Optional
88

99
from .base import BaseInferenceType
1010

@@ -19,13 +19,31 @@ class TableQuestionAnsweringInputData(BaseInferenceType):
1919
"""The table to serve as context for the questions"""
2020

2121

22+
Padding = Literal["do_not_pad", "longest", "max_length"]
23+
24+
25+
@dataclass
26+
class TableQuestionAnsweringParameters(BaseInferenceType):
27+
"""Additional inference parameters for Table Question Answering"""
28+
29+
padding: Optional["Padding"] = None
30+
"""Activates and controls padding."""
31+
sequential: Optional[bool] = None
32+
"""Whether to do inference sequentially or as a batch. Batching is faster, but models like
33+
SQA require the inference to be done sequentially to extract relations within sequences,
34+
given their conversational nature.
35+
"""
36+
truncation: Optional[bool] = None
37+
"""Activates and controls truncation."""
38+
39+
2240
@dataclass
2341
class TableQuestionAnsweringInput(BaseInferenceType):
2442
"""Inputs for Table Question Answering inference"""
2543

2644
inputs: TableQuestionAnsweringInputData
2745
"""One (table, question) pair to answer"""
28-
parameters: Optional[Dict[str, Any]] = None
46+
parameters: Optional[TableQuestionAnsweringParameters] = None
2947
"""Additional inference parameters for Table Question Answering"""
3048

3149

utils/check_task_parameters.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@
6868
"audio_to_audio",
6969
"feature_extraction",
7070
"sentence_similarity",
71-
"table_question_answering",
7271
"automatic_speech_recognition",
7372
"image_to_text",
7473
]

0 commit comments

Comments
 (0)