|
118 | 118 | - local: tools |
119 | 119 | title: Tools |
120 | 120 | - local: transformers_as_backend |
121 | | - title: Inference server backends |
| 121 | + title: Transformers as modeling backend |
122 | 122 | - local: continuous_batching |
123 | 123 | title: Continuous Batching |
124 | 124 | title: Inference |
|
422 | 422 | title: BLOOM |
423 | 423 | - local: model_doc/blt |
424 | 424 | title: BLT |
425 | | - - local: model_doc/bort |
426 | | - title: BORT |
427 | 425 | - local: model_doc/byt5 |
428 | 426 | title: ByT5 |
429 | 427 | - local: model_doc/camembert |
|
478 | 476 | title: Ernie4_5 |
479 | 477 | - local: model_doc/ernie4_5_moe |
480 | 478 | title: Ernie4_5_MoE |
481 | | - - local: model_doc/ernie_m |
482 | | - title: ErnieM |
483 | 479 | - local: model_doc/esm |
484 | 480 | title: ESM |
485 | 481 | - local: model_doc/exaone4 |
|
534 | 530 | title: GPTBigCode |
535 | 531 | - local: model_doc/gpt_oss |
536 | 532 | title: GptOss |
537 | | - - local: model_doc/gptsan-japanese |
538 | | - title: GPTSAN Japanese |
539 | 533 | - local: model_doc/gpt-sw3 |
540 | 534 | title: GPTSw3 |
541 | 535 | - local: model_doc/granite |
|
560 | 554 | title: Jamba |
561 | 555 | - local: model_doc/jetmoe |
562 | 556 | title: JetMoe |
563 | | - - local: model_doc/jukebox |
564 | | - title: Jukebox |
565 | 557 | - local: model_doc/led |
566 | 558 | title: LED |
567 | 559 | - local: model_doc/lfm2 |
|
596 | 588 | title: MarkupLM |
597 | 589 | - local: model_doc/mbart |
598 | 590 | title: MBart and MBart-50 |
599 | | - - local: model_doc/mega |
600 | | - title: MEGA |
601 | 591 | - local: model_doc/megatron-bert |
602 | 592 | title: MegatronBERT |
603 | 593 | - local: model_doc/megatron_gpt2 |
|
632 | 622 | title: myt5 |
633 | 623 | - local: model_doc/nemotron |
634 | 624 | title: Nemotron |
635 | | - - local: model_doc/nezha |
636 | | - title: NEZHA |
637 | 625 | - local: model_doc/nllb |
638 | 626 | title: NLLB |
639 | 627 | - local: model_doc/nllb-moe |
|
648 | 636 | title: Olmo3 |
649 | 637 | - local: model_doc/olmoe |
650 | 638 | title: OLMoE |
651 | | - - local: model_doc/open-llama |
652 | | - title: Open-Llama |
653 | 639 | - local: model_doc/opt |
654 | 640 | title: OPT |
655 | 641 | - local: model_doc/pegasus |
|
670 | 656 | title: PLBart |
671 | 657 | - local: model_doc/prophetnet |
672 | 658 | title: ProphetNet |
673 | | - - local: model_doc/qdqbert |
674 | | - title: QDQBert |
675 | 659 | - local: model_doc/qwen2 |
676 | 660 | title: Qwen2 |
677 | 661 | - local: model_doc/qwen2_moe |
|
684 | 668 | title: Qwen3Next |
685 | 669 | - local: model_doc/rag |
686 | 670 | title: RAG |
687 | | - - local: model_doc/realm |
688 | | - title: REALM |
689 | 671 | - local: model_doc/recurrent_gemma |
690 | 672 | title: RecurrentGemma |
691 | 673 | - local: model_doc/reformer |
692 | 674 | title: Reformer |
693 | 675 | - local: model_doc/rembert |
694 | 676 | title: RemBERT |
695 | | - - local: model_doc/retribert |
696 | | - title: RetriBERT |
697 | 677 | - local: model_doc/roberta |
698 | 678 | title: RoBERTa |
699 | 679 | - local: model_doc/roberta-prelayernorm |
|
722 | 702 | title: T5Gemma |
723 | 703 | - local: model_doc/t5v1.1 |
724 | 704 | title: T5v1.1 |
725 | | - - local: model_doc/tapex |
726 | | - title: TAPEX |
727 | | - - local: model_doc/transfo-xl |
728 | | - title: Transformer XL |
729 | 705 | - local: model_doc/ul2 |
730 | 706 | title: UL2 |
731 | 707 | - local: model_doc/umt5 |
|
738 | 714 | title: XGLM |
739 | 715 | - local: model_doc/xlm |
740 | 716 | title: XLM |
741 | | - - local: model_doc/xlm-prophetnet |
742 | | - title: XLM-ProphetNet |
743 | 717 | - local: model_doc/xlm-roberta |
744 | 718 | title: XLM-RoBERTa |
745 | 719 | - local: model_doc/xlm-roberta-xl |
|
786 | 760 | title: Depth Anything V2 |
787 | 761 | - local: model_doc/depth_pro |
788 | 762 | title: DepthPro |
789 | | - - local: model_doc/deta |
790 | | - title: DETA |
791 | 763 | - local: model_doc/detr |
792 | 764 | title: DETR |
793 | 765 | - local: model_doc/dinat |
|
802 | 774 | title: DiT |
803 | 775 | - local: model_doc/dpt |
804 | 776 | title: DPT |
805 | | - - local: model_doc/efficientformer |
806 | | - title: EfficientFormer |
807 | 777 | - local: model_doc/efficientloftr |
808 | 778 | title: EfficientLoFTR |
809 | 779 | - local: model_doc/efficientnet |
|
840 | 810 | title: MobileViT |
841 | 811 | - local: model_doc/mobilevitv2 |
842 | 812 | title: MobileViTV2 |
843 | | - - local: model_doc/nat |
844 | | - title: NAT |
845 | 813 | - local: model_doc/poolformer |
846 | 814 | title: PoolFormer |
847 | 815 | - local: model_doc/prompt_depth_anything |
|
860 | 828 | title: RT-DETRv2 |
861 | 829 | - local: model_doc/sam2 |
862 | 830 | title: SAM2 |
| 831 | + - local: model_doc/sam3_tracker |
| 832 | + title: Sam3Tracker |
863 | 833 | - local: model_doc/segformer |
864 | 834 | title: SegFormer |
865 | 835 | - local: model_doc/seggpt |
|
888 | 858 | title: Timm Wrapper |
889 | 859 | - local: model_doc/upernet |
890 | 860 | title: UperNet |
891 | | - - local: model_doc/van |
892 | | - title: VAN |
893 | 861 | - local: model_doc/vit |
894 | 862 | title: Vision Transformer (ViT) |
895 | | - - local: model_doc/vit_hybrid |
896 | | - title: ViT Hybrid |
897 | 863 | - local: model_doc/vitdet |
898 | 864 | title: ViTDet |
899 | 865 | - local: model_doc/vit_mae |
|
932 | 898 | title: Hubert |
933 | 899 | - local: model_doc/kyutai_speech_to_text |
934 | 900 | title: Kyutai Speech-To-Text |
935 | | - - local: model_doc/mctct |
936 | | - title: MCTCT |
937 | 901 | - local: model_doc/mimi |
938 | 902 | title: Mimi |
939 | 903 | - local: model_doc/mms |
|
960 | 924 | title: SEW-D |
961 | 925 | - local: model_doc/speech_to_text |
962 | 926 | title: Speech2Text |
963 | | - - local: model_doc/speech_to_text_2 |
964 | | - title: Speech2Text2 |
965 | 927 | - local: model_doc/speecht5 |
966 | 928 | title: SpeechT5 |
967 | 929 | - local: model_doc/unispeech |
|
994 | 956 | - sections: |
995 | 957 | - local: model_doc/sam2_video |
996 | 958 | title: SAM2 Video |
| 959 | + - local: model_doc/sam3_tracker_video |
| 960 | + title: Sam3TrackerVideo |
997 | 961 | - local: model_doc/timesformer |
998 | 962 | title: TimeSformer |
999 | 963 | - local: model_doc/vjepa2 |
|
1068 | 1032 | title: Gemma3n |
1069 | 1033 | - local: model_doc/git |
1070 | 1034 | title: GIT |
| 1035 | + - local: model_doc/glm46v |
| 1036 | + title: Glm46V |
1071 | 1037 | - local: model_doc/glm4v |
1072 | 1038 | title: glm4v |
1073 | 1039 | - local: model_doc/glm4v_moe |
|
1172 | 1138 | title: Qwen3VL |
1173 | 1139 | - local: model_doc/qwen3_vl_moe |
1174 | 1140 | title: Qwen3VLMoe |
| 1141 | + - local: model_doc/sam3 |
| 1142 | + title: SAM3 |
| 1143 | + - local: model_doc/sam3_video |
| 1144 | + title: SAM3 Video |
1175 | 1145 | - local: model_doc/shieldgemma2 |
1176 | 1146 | title: ShieldGemma2 |
1177 | 1147 | - local: model_doc/siglip |
|
1188 | 1158 | title: TAPAS |
1189 | 1159 | - local: model_doc/trocr |
1190 | 1160 | title: TrOCR |
1191 | | - - local: model_doc/tvlt |
1192 | | - title: TVLT |
1193 | 1161 | - local: model_doc/tvp |
1194 | 1162 | title: TVP |
1195 | 1163 | - local: model_doc/udop |
|
1216 | 1184 | - sections: |
1217 | 1185 | - local: model_doc/decision_transformer |
1218 | 1186 | title: Decision Transformer |
1219 | | - - local: model_doc/trajectory_transformer |
1220 | | - title: Trajectory Transformer |
1221 | 1187 | title: Reinforcement learning models |
1222 | 1188 | - sections: |
1223 | 1189 | - local: model_doc/autoformer |
|
1233 | 1199 | - local: model_doc/timesfm |
1234 | 1200 | title: TimesFM |
1235 | 1201 | title: Time series models |
1236 | | - - sections: |
1237 | | - - local: model_doc/graphormer |
1238 | | - title: Graphormer |
1239 | | - title: Graph models |
1240 | 1202 | title: Models |
1241 | 1203 | - sections: |
1242 | 1204 | - local: internal/modeling_utils |
|
0 commit comments