-
Notifications
You must be signed in to change notification settings - Fork 424
/
Copy pathfull_train.yaml
60 lines (52 loc) · 1.64 KB
/
full_train.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# FFT config for Deepseek R1 Distill Llama 3.1 8B.
#
# Usage:
# oumi train -c configs/recipes/deepseek_r1/sft/distill_llama_8b/full_train.yaml
#
# See Also:
# - Documentation: https://oumi.ai/docs/en/latest/user_guides/train/train.html
# - Config class: oumi.core.configs.TrainingConfig
# - Config source: https://github.com/oumi-ai/oumi/blob/main/src/oumi/core/configs/training_config.py
# - Other training configs: configs/**/pretraining/, configs/**/sft/, configs/**/dpo/
model:
model_name: "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
model_max_length: 8192
torch_dtype_str: "bfloat16"
attn_implementation: "sdpa"
trust_remote_code: True
# Improves training speed by 20% with default config.
enable_liger_kernel: True
data:
train:
datasets:
- dataset_name: "yahma/alpaca-cleaned"
target_col: "prompt"
use_async_dataset: True
training:
trainer_type: "TRL_SFT"
save_steps: 800
num_train_epochs: 3
per_device_train_batch_size: 2
gradient_accumulation_steps: 1
enable_gradient_checkpointing: True
gradient_checkpointing_kwargs:
use_reentrant: False
ddp_find_unused_parameters: False
optimizer: "adamw_torch_fused"
learning_rate: 2.0e-05
lr_scheduler_type: "cosine"
compile: False
dataloader_num_workers: "auto"
dataloader_prefetch_factor: 32
logging_steps: 100
log_model_summary: False
empty_device_cache_steps: 50
output_dir: "output/deepseek_r1_llama8b.fft"
include_performance_metrics: True
enable_wandb: True
fsdp:
enable_fsdp: True
sharding_strategy: "HYBRID_SHARD"
forward_prefetch: True
auto_wrap_policy: "TRANSFORMER_BASED_WRAP"
transformer_layer_cls: "LlamaDecoderLayer"