We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 42d4c12 commit 563aae0Copy full SHA for 563aae0
configs/llm_finetuning/smollm2_guanaco.yml
@@ -0,0 +1,34 @@
1
+task: llm-sft
2
+base_model: HuggingFaceTB/SmolLM2-135M-Instruct
3
+project_name: autotrain-smollm2-135m-finetune-guanaco
4
+log: tensorboard
5
+backend: local
6
+
7
+data:
8
+ path: timdettmers/openassistant-guanaco
9
+ train_split: train
10
+ valid_split: null
11
+ chat_template: null
12
+ column_mapping:
13
+ text_column: text
14
15
+params:
16
+ block_size: 1024
17
+ model_max_length: 2048
18
+ epochs: 1
19
+ batch_size: 1
20
+ lr: 1e-5
21
+ peft: true
22
+ quantization: int4
23
+ target_modules: all-linear
24
+ padding: right
25
+ optimizer: paged_adamw_8bit
26
+ scheduler: linear
27
+ gradient_accumulation: 8
28
+ mixed_precision: bf16
29
+ merge_adapter: true
30
31
+hub:
32
+ username: ${HF_USERNAME}
33
+ token: ${HF_TOKEN}
34
+ push_to_hub: true
0 commit comments