Skip to content

Commit 5d58b51

Browse files
Release 0.18.0.rc0 (#2849)
* [WIP] Release 0.18.0 - Bump version - Deprecation in CPT * Change version to 0.18.0.rc0 * More small fixes: - find a BOFT config that works with vision models - remove warning import in cpt config - make style * Fix CPT tests and example - now mandatory to pass task_type as CAUSAL_LM - notebook: logging_dir argument raised an error
1 parent 41091ec commit 5d58b51

File tree

8 files changed

+25
-21
lines changed

8 files changed

+25
-21
lines changed

examples/cpt_finetuning/cpt_train_and_inference.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@
159159
" TrainingArguments,\n",
160160
")\n",
161161
"\n",
162-
"from peft import CPTConfig, get_peft_model\n",
162+
"from peft import CPTConfig, TaskType, get_peft_model\n",
163163
"\n",
164164
"\n",
165165
"MAX_INPUT_LENGTH = 1024\n",
@@ -559,6 +559,7 @@
559559
"\n",
560560
"# Initialize the CPT configuration\n",
561561
"config = CPTConfig(\n",
562+
" task_type=TaskType.CAUSAL_LM,\n",
562563
" cpt_token_ids=context_ids,\n",
563564
" cpt_mask=context_attention_mask,\n",
564565
" cpt_tokens_type_mask=context_input_type_mask,\n",
@@ -761,7 +762,6 @@
761762
" num_train_epochs=5,\n",
762763
" fp16=True,\n",
763764
" save_strategy='no',\n",
764-
" logging_dir=\"logs\",\n",
765765
" report_to=\"none\"\n",
766766
")\n",
767767
"\n",

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from setuptools import find_packages, setup
1616

1717

18-
VERSION = "0.17.2.dev0"
18+
VERSION = "0.18.0.rc0"
1919

2020
extras = {}
2121
extras["quality"] = [

src/peft/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.17.2.dev0"
15+
__version__ = "0.18.0.rc0"
1616

1717
from .auto import (
1818
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,

src/peft/tuners/cpt/config.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import warnings
1615
from dataclasses import dataclass, field
1716
from typing import Literal, Optional
1817

@@ -81,13 +80,7 @@ def __post_init__(self):
8180
self.num_transformer_submodules = 1 # Number of transformer submodules used.
8281
self.peft_type = PeftType.CPT # Specifies that the PEFT type is CPT.
8382
if self.task_type != TaskType.CAUSAL_LM:
84-
# TODO: adjust this to raise an error with PEFT v0.18.0
85-
warnings.warn(
86-
f"{self.__class__.__name__} only supports task_type = {TaskType.CAUSAL_LM.value}, "
87-
"setting it automatically. This will raise an error starting from PEFT v0.18.0.",
88-
FutureWarning,
89-
)
90-
self.task_type = TaskType.CAUSAL_LM # Ensures task type is causal language modeling.
83+
raise ValueError(f"{self.__class__.__name__} only supports task_type = {TaskType.CAUSAL_LM.value}.")
9184

9285
if self.cpt_token_ids is None:
9386
self.cpt_token_ids = [0]

tests/test_config.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
BOFTConfig,
2727
BoneConfig,
2828
C3AConfig,
29+
CPTConfig,
2930
FourierFTConfig,
3031
HRAConfig,
3132
IA3Config,
@@ -136,6 +137,8 @@ def test_from_peft_type(self):
136137

137138
if expected_cls == AdaLoraConfig:
138139
mandatory_config_kwargs = {"total_step": 1}
140+
elif expected_cls == CPTConfig:
141+
mandatory_config_kwargs = {"task_type": TaskType.CAUSAL_LM}
139142

140143
config = PeftConfig.from_peft_type(peft_type=peft_type, **mandatory_config_kwargs)
141144
assert type(config) is expected_cls

tests/test_cpt.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def config_text():
5555
opt_projection_epsilon=0.2,
5656
opt_projection_format_epsilon=0.1,
5757
tokenizer_name_or_path=MODEL_NAME,
58+
task_type=TaskType.CAUSAL_LM,
5859
)
5960
return config
6061

@@ -68,6 +69,7 @@ def config_random():
6869
opt_projection_epsilon=0.2,
6970
opt_projection_format_epsilon=0.1,
7071
tokenizer_name_or_path=MODEL_NAME,
72+
task_type=TaskType.CAUSAL_LM,
7173
)
7274
return config
7375

@@ -227,12 +229,14 @@ def test_model_initialization_random(global_tokenizer, config_random):
227229
assert model is not None, "PEFT model initialization failed"
228230

229231

230-
def test_model_initialization_wrong_task_type_warns():
231-
# TODO: adjust this test to check for an error with PEFT v0.18.0
232-
msg = "CPTConfig only supports task_type = CAUSAL_LM, setting it automatically"
233-
with pytest.warns(FutureWarning, match=msg):
234-
config = CPTConfig(task_type=TaskType.SEQ_CLS)
235-
assert config.task_type == TaskType.CAUSAL_LM
232+
def test_model_initialization_wrong_task_type_raises():
233+
msg = "CPTConfig only supports task_type = CAUSAL_LM."
234+
with pytest.raises(ValueError, match=msg):
235+
CPTConfig(task_type=TaskType.SEQ_CLS)
236+
237+
msg = "CPTConfig only supports task_type = CAUSAL_LM."
238+
with pytest.raises(ValueError, match=msg):
239+
CPTConfig()
236240

237241

238242
def test_model_training_random(sst_data, global_tokenizer, collator, config_random):

tests/test_decoder_models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -735,6 +735,7 @@ def process(samples):
735735
(
736736
CPTConfig,
737737
{
738+
"task_type": "CAUSAL_LM",
738739
"cpt_token_ids": [0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing
739740
"cpt_mask": [1, 1, 1, 1, 1, 1, 1, 1],
740741
"cpt_tokens_type_mask": [1, 2, 2, 2, 3, 3, 4, 4],

tests/test_vision_models.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
)
3030

3131
from peft import (
32+
BOFTConfig,
3233
HRAConfig,
3334
LoHaConfig,
3435
LoKrConfig,
@@ -50,10 +51,12 @@
5051
r=1, oft_block_size=0, target_modules=["convolution"], modules_to_save=["classifier", "normalization"]
5152
),
5253
"hra": HRAConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
53-
# TODO: cannot use BOFT because some convolutional kernel dimensions are even (64) and others odd (147). There is no
54-
# common denominator for the boft_block_size except 1, but using 1 results in an error in the fbd_cuda kernel:
54+
# Cannot target multiple layers with BOFT because some convolutional kernel dimensions vary and there is no common
55+
# denominator for the boft_block_size except 1, but using 1 results in an error in the fbd_cuda kernel:
5556
# > Error in forward_fast_block_diag_cuda_kernel: an illegal memory access was encountered
56-
# "boft": BOFTConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"], boft_block_size=2),
57+
"boft": BOFTConfig(
58+
target_modules=["0.layer.0.convolution"], modules_to_save=["classifier", "normalization"], boft_block_size=2
59+
),
5760
}
5861

5962

0 commit comments

Comments
 (0)