Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,6 @@ dmypy.json

# Pyre type checker
.pyre/

# Claude Code
.claude/*
1,560 changes: 1,560 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

89 changes: 89 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
[tool.poetry]
name = "dine-domain-adaptation"
version = "0.1.0"
description = "DINE: Domain Adaptation with Debiased Representation Learning"
authors = ["Your Name <your.email@example.com>"]
readme = "README.md"
packages = [{include = "*.py", from = "."}]

[tool.poetry.dependencies]
python = "^3.8"
torch = "^2.0.0"
torchvision = "^0.15.0"
numpy = "^1.21.0"
scipy = "^1.7.0"
scikit-learn = "^1.0.0"
tqdm = "^4.62.0"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.0"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.pytest.ini_options]
minversion = "6.0"
addopts = [
"-ra",
"-q",
"--strict-markers",
"--strict-config",
"--cov=.",
"--cov-report=term-missing",
"--cov-report=html:htmlcov",
"--cov-report=xml",
]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
markers = [
"unit: marks tests as unit tests (deselect with '-m \"not unit\"')",
"integration: marks tests as integration tests (deselect with '-m \"not integration\"')",
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
]

[tool.coverage.run]
source = ["."]
omit = [
"tests/*",
"*/tests/*",
"test_*",
"*_test.py",
"*/site-packages/*",
"*/.venv/*",
"*/venv/*",
"setup.py",
"conftest.py",
]
branch = true

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:",
"class .*\\bProtocol\\):",
"@(abc\\.)?abstractmethod",
]
ignore_errors = true
show_missing = true
fail_under = 80

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"
Empty file added tests/__init__.py
Empty file.
158 changes: 158 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
"""
Shared pytest fixtures for the DINE project test suite.

This module provides common fixtures and test utilities that can be used
across all test modules in the project.
"""

import os
import tempfile
import shutil
from pathlib import Path
from unittest.mock import Mock, MagicMock
import pytest
import torch
import numpy as np


@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files."""
temp_dir = tempfile.mkdtemp()
yield Path(temp_dir)
shutil.rmtree(temp_dir, ignore_errors=True)


@pytest.fixture
def sample_config():
"""Provide a sample configuration dictionary for testing."""
return {
'batch_size': 32,
'learning_rate': 0.001,
'epochs': 10,
'resize_size': 256,
'crop_size': 224,
'num_classes': 65,
'dataset': 'office-home'
}


@pytest.fixture
def mock_args():
"""Mock command line arguments for testing."""
args = Mock()
args.dset = 'office-home'
args.s_dset_path = './data/office-home/Art_list.txt'
args.t_dset_path = './data/office-home/Clipart_list.txt'
args.output_dir = './output'
args.gpu_id = '0'
args.net = 'resnet50'
args.seed = 2020
args.max_epoch = 50
args.batch_size = 36
args.lr = 1e-2
args.alpha = 0.3
args.bottleneck_dim = 256
args.epsilon = 1e-5
args.layer = 'wn'
args.classifier = 'bn'
args.distance = 'cosine'
args.threshold = 10
args.smooth = 0.1
return args


@pytest.fixture
def sample_tensor():
"""Provide a sample tensor for testing."""
return torch.randn(10, 3, 224, 224)


@pytest.fixture
def sample_labels():
"""Provide sample labels for testing."""
return torch.randint(0, 65, (10,))


@pytest.fixture
def mock_dataloader():
"""Mock PyTorch DataLoader for testing."""
mock_loader = Mock()
mock_loader.__iter__ = Mock(return_value=iter([
(torch.randn(4, 3, 224, 224), torch.randint(0, 65, (4,)))
for _ in range(3)
]))
mock_loader.__len__ = Mock(return_value=3)
return mock_loader


@pytest.fixture
def mock_model():
"""Mock PyTorch model for testing."""
model = Mock()
model.eval = Mock()
model.train = Mock()
model.parameters = Mock(return_value=[torch.randn(10, 10, requires_grad=True)])
model.state_dict = Mock(return_value={'layer.weight': torch.randn(10, 10)})
model.load_state_dict = Mock()
return model


@pytest.fixture
def sample_image_list_file(temp_dir):
"""Create a sample image list file for testing."""
image_list_file = temp_dir / "sample_list.txt"
with open(image_list_file, 'w') as f:
f.write("image1.jpg 0\n")
f.write("image2.jpg 1\n")
f.write("image3.jpg 2\n")
return image_list_file


@pytest.fixture
def mock_numpy_array():
"""Provide a mock numpy array for testing."""
return np.random.randn(100, 256)


@pytest.fixture
def setup_test_environment(temp_dir, monkeypatch):
"""Setup test environment with temporary directories and environment variables."""
# Create necessary directories
data_dir = temp_dir / "data"
output_dir = temp_dir / "output"
data_dir.mkdir()
output_dir.mkdir()

# Set environment variables
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0")

return {
'data_dir': data_dir,
'output_dir': output_dir,
'temp_dir': temp_dir
}


@pytest.fixture(autouse=True)
def set_deterministic_behavior():
"""Set deterministic behavior for reproducible tests."""
torch.manual_seed(42)
np.random.seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
torch.cuda.manual_seed_all(42)

# Ensure deterministic behavior
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False


@pytest.fixture
def mock_optimizer():
"""Mock PyTorch optimizer for testing."""
optimizer = Mock()
optimizer.param_groups = [{'lr': 0.01, 'lr0': 0.01}]
optimizer.zero_grad = Mock()
optimizer.step = Mock()
return optimizer
Empty file added tests/integration/__init__.py
Empty file.
115 changes: 115 additions & 0 deletions tests/test_infrastructure.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
"""
Infrastructure validation tests.

These tests verify that the testing infrastructure is properly configured
and all necessary dependencies are available.
"""

import pytest
import torch
import numpy as np
import sklearn
import scipy
from pathlib import Path


class TestInfrastructure:
"""Test the testing infrastructure setup."""

def test_pytorch_available(self):
"""Test that PyTorch is available and working."""
tensor = torch.tensor([1, 2, 3])
assert tensor.sum().item() == 6

def test_numpy_available(self):
"""Test that NumPy is available and working."""
array = np.array([1, 2, 3])
assert np.sum(array) == 6

def test_sklearn_available(self):
"""Test that scikit-learn is available."""
from sklearn.metrics import accuracy_score
y_true = [1, 1, 0, 0]
y_pred = [1, 1, 0, 0]
assert accuracy_score(y_true, y_pred) == 1.0

def test_scipy_available(self):
"""Test that SciPy is available."""
from scipy.spatial.distance import cdist
import numpy as np

x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
distances = cdist(x, y)
assert distances.shape == (2, 2)

@pytest.mark.unit
def test_unit_marker(self):
"""Test that unit test marker works."""
assert True

@pytest.mark.integration
def test_integration_marker(self):
"""Test that integration test marker works."""
assert True

@pytest.mark.slow
def test_slow_marker(self):
"""Test that slow test marker works."""
assert True

def test_fixtures_available(self, temp_dir, sample_config, mock_args):
"""Test that custom fixtures are available and working."""
# Test temp_dir fixture
assert temp_dir.exists()
assert temp_dir.is_dir()

# Test sample_config fixture
assert isinstance(sample_config, dict)
assert 'batch_size' in sample_config

# Test mock_args fixture
assert hasattr(mock_args, 'dset')
assert mock_args.dset == 'office-home'

def test_project_structure(self):
"""Test that the project has the expected structure."""
project_root = Path(__file__).parent.parent

# Check main Python files exist
assert (project_root / "DINE_dist.py").exists()
assert (project_root / "DINE_ft.py").exists()
assert (project_root / "network.py").exists()
assert (project_root / "loss.py").exists()
assert (project_root / "data_list.py").exists()

# Check pyproject.toml exists
assert (project_root / "pyproject.toml").exists()

# Check test structure
tests_dir = project_root / "tests"
assert tests_dir.exists()
assert (tests_dir / "__init__.py").exists()
assert (tests_dir / "conftest.py").exists()
assert (tests_dir / "unit" / "__init__.py").exists()
assert (tests_dir / "integration" / "__init__.py").exists()

def test_deterministic_behavior(self):
"""Test that deterministic behavior is set for reproducible tests."""
# Test torch deterministic behavior
torch.manual_seed(42)
tensor1 = torch.randn(5)

torch.manual_seed(42)
tensor2 = torch.randn(5)

assert torch.allclose(tensor1, tensor2)

# Test numpy deterministic behavior
np.random.seed(42)
array1 = np.random.randn(5)

np.random.seed(42)
array2 = np.random.randn(5)

assert np.allclose(array1, array2)
Empty file added tests/unit/__init__.py
Empty file.