Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/job_jax_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ jobs:
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
# checkout action cleans up the workspace and have to be the first step
- name: Fetch custom actions
Expand Down Expand Up @@ -75,7 +77,6 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Install OpenVINO dependencies (mac)
if: runner.os == 'macOS'
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/job_jax_models_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
# checkout action cleans up the workspace and have to be the first step
- name: Fetch custom actions
Expand Down Expand Up @@ -77,7 +79,6 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Extract OpenVINO packages and tests
run: pigz -dc openvino_tests.tar.gz | tar -xf - -v
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/job_onnx_models_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ jobs:
ONNX_MODEL_ZOO_SHA: "5faef4c33eba0395177850e1e31c4a6a9e634c82"
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
LOGS_FOLDER: ${{ github.workspace }}/onnx_models_tests_logs"
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
if: ${{ github.event_name != 'merge_group' }}
steps:
# checkout action cleans up the workspace and have to be the first step
Expand Down Expand Up @@ -78,7 +80,6 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=$HF_HUB_CACHE" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"
echo "MODELS_SHARE_PATH=/mount/testdata$((GITHUB_RUN_NUMBER % NUMBER_OF_REPLICAS))" >> "$GITHUB_ENV"

# Issue 148922
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/job_pytorch_fx_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ jobs:
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
- name: Fetch setup_python and install wheels actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down Expand Up @@ -74,7 +76,6 @@ jobs:
run: |
Add-Content -Path $env:GITHUB_ENV -Value "HF_HUB_CACHE=C:\\mount\\caches\\huggingface"
Add-Content -Path $env:GITHUB_ENV -Value "HUGGINGFACE_HUB_CACHE=C:\\mount\\caches\\huggingface"
Add-Content -Path $env:GITHUB_ENV -Value "HF_HUB_OFFLINE=1"

- name: Install OpenVINO dependencies (mac)
if: runner.os == 'macOS'
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/job_pytorch_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
- name: Fetch setup_python and install wheels actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand All @@ -75,7 +77,7 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Install OpenVINO dependencies (mac)
if: runner.os == 'macOS'
run: brew install pigz
Expand Down
5 changes: 2 additions & 3 deletions .github/workflows/job_pytorch_models_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
- name: Fetch setup_python and install wheels actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down Expand Up @@ -75,7 +77,6 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Extract OpenVINO artifacts
run: pigz -dc openvino_tests.tar.gz | tar -xf - -v
Expand Down Expand Up @@ -119,7 +120,6 @@ jobs:
TYPE: ${{ inputs.model_scope == 'precommit' && 'precommit' || 'nightly' }}
TEST_DEVICE: CPU
OP_REPORT_FILE: ${{ env.INSTALL_TEST_DIR }}/TEST-torch_unsupported_ops.log
HF_HUB_OFFLINE: 0 # Can't use offline mode - these tests use `model_info` call, which can't be cached

- name: PagedAttention Test
if: ${{ inputs.model_scope == 'precommit' }}
Expand All @@ -128,7 +128,6 @@ jobs:
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/transformation_tests/test_pa_transformation.py -m precommit --html=${INSTALL_TEST_DIR}/TEST-torch_pagedattention_tests.html --self-contained-html -vvv -s --tb=short -n 2
env:
TEST_DEVICE: CPU
HF_HUB_OFFLINE: 0

- name: RoPE Test
if: ${{ inputs.model_scope == 'precommit' }}
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/job_tensorflow_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ jobs:
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
- name: Fetch custom actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down Expand Up @@ -80,7 +82,6 @@ jobs:
run: |
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Install OpenVINO dependencies (mac)
if: ${{ runner.os == 'macOS' }}
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/job_tensorflow_models_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
NUMBER_OF_REPLICAS: 2
USE_SYSTEM_CACHE: False # Using remote HuggingFace cache
HF_HUB_VERBOSITY: debug
TRANSFORMERS_VERBOSITY: debug
steps:
- name: Fetch custom actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down Expand Up @@ -76,7 +78,6 @@ jobs:
echo "HF_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "HUGGINGFACE_HUB_CACHE=${{ runner.os == 'Linux' && env.HF_HUB_CACHE_LIN || env.HF_HUB_CACHE_WIN }}" >> "$GITHUB_ENV"
echo "TFHUB_CACHE_DIR=/mount/testdata$((GITHUB_RUN_NUMBER % NUMBER_OF_REPLICAS))/tfhub_models" >> "$GITHUB_ENV"
echo "HF_HUB_OFFLINE=1" >> "$GITHUB_ENV"

- name: Extract OpenVINO artifacts (Linux and macOS)
run: pigz -dc openvino_tests.tar.gz | tar -xf - -v
Expand Down
4 changes: 3 additions & 1 deletion tests/e2e_tests/test_utils/pytorch_loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import sys

import torch
from huggingface_hub import snapshot_download


class LoadPyTorchModel:
Expand Down Expand Up @@ -70,7 +71,8 @@ def load_cadene_model(module, args):

def load_hugging_face_model(module, args):
module = importlib.import_module(module)
model = module.AutoModel.from_pretrained(args['model-name'], torchscript=True)
model_cached = snapshot_download(args['model-name']) # required to avoid HF rate limits
model = module.AutoModel.from_pretrained(model_cached, torchscript=True)

return model

Expand Down
6 changes: 4 additions & 2 deletions tests/llm/accuracy_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import tempfile

import pytest
from huggingface_hub import snapshot_download
from optimum.intel.openvino import (OVModelForCausalLM,
OVWeightQuantizationConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
Expand Down Expand Up @@ -100,8 +101,9 @@ def setup_model(model_id):
logger.info(f"Setting up model: {model_id}")

# Download original model
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model_cached = snapshot_download(model_id) # required to avoid HF rate limits
model = AutoModelForCausalLM.from_pretrained(model_cached)
tokenizer = AutoTokenizer.from_pretrained(model_cached)

# Save original model
model_path = get_model_path(model_id, "org")
Expand Down
10 changes: 6 additions & 4 deletions tests/model_hub_tests/jax/test_hf_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import os
import pytest
import requests
from huggingface_hub import snapshot_download
from PIL import Image
from models_hub_common.constants import hf_cache_dir, clean_hf_cache_dir
from models_hub_common.utils import cleanup_dir, get_models_list, retry
Expand All @@ -22,17 +23,18 @@
class TestTransformersModel(TestJaxConvertModel):
@retry(3, exceptions=(OSError,), delay=1)
def load_model(self, model_name, _):
model = FlaxAutoModel.from_pretrained(model_name)
model_cached = snapshot_download(model_name) # required to avoid HF rate limits
model = FlaxAutoModel.from_pretrained(model_cached)
if model_name in ['google/vit-base-patch16-224-in21k']:
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
image_processor = AutoImageProcessor.from_pretrained(model_name)
image_processor = AutoImageProcessor.from_pretrained(model_cached)
self.example = image_processor(images=image, return_tensors="np")
elif model_name in ['albert/albert-base-v2', 'facebook/bart-base', 'ksmcg/Mistral-tiny']:
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_cached)
self.example = tokenizer("Hello, my dog is cute", return_tensors="np")
elif model_name in ['openai/clip-vit-base-patch32']:
processor = AutoProcessor.from_pretrained(model_name)
processor = AutoProcessor.from_pretrained(model_cached)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
self.example = processor(text=["a photo of a cat", "a photo of a dog"],
Expand Down
4 changes: 3 additions & 1 deletion tests/model_hub_tests/pytorch/test_edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pytest
import random
import torch
from huggingface_hub import snapshot_download
from models_hub_common.constants import hf_cache_dir, clean_hf_cache_dir
from models_hub_common.utils import cleanup_dir

Expand Down Expand Up @@ -44,8 +45,9 @@ def load_model(self, model_name, model_link):
image = Image.open(requests.get(url, stream=True).raw)
assert model_name in name_to_class, "Unexpected model name"
print(f"scale: {self.scale}")
model_cached = snapshot_download(f'eugenesiow/{model_name}') # required to avoid HF rate limits
model = name_to_class[model_name].from_pretrained(
f'eugenesiow/{model_name}', scale=self.scale)
model_cached, scale=self.scale)
inputs = ImageLoader.load_image(image)
self.example = (torch.randn_like(inputs),)
self.inputs = (inputs,)
Expand Down
Loading
Loading