-
Notifications
You must be signed in to change notification settings - Fork 416
/
11B_full.yaml
84 lines (75 loc) · 2.82 KB
/
11B_full.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Config for single device full finetuning in full_finetune_single_device.py
# using a Llama3.2 11B Vision Instruct model
#
# This config assumes that you've run the following command before launching:
# tune download meta-llama/Llama-3.2-11B-Vision-Instruct --output-dir /tmp/Llama-3.2-11B-Vision-Instruct --ignore-patterns "original/consolidated*"
#
# To launch on a single device, run the following command from root:
# tune run --nproc_per_node 4 full_finetune_distributed --config llama3_2_vision/11B_full
#
# You can add specific overrides through the command line. For example
# to override the checkpointer directory while launching training:
# tune run --nproc_per_node 4 full_finetune_distributed --config llama3_2_vision/11B_full checkpointer.checkpoint_dir=<YOUR_CHECKPOINT_DIR>
#
# This config works best when the model is being fine-tuned on 2+ GPUs.
# Single device full finetuning requires more memory optimizations. It's
# best to use 11B_full_single_device.yaml for those cases.
# Model arguments
model:
_component_: torchtune.models.llama3_2_vision.llama3_2_vision_11b
decoder_trainable: False
encoder_trainable: True
fusion_trainable: True
image_size: 560 # Make sure this matches the image_size in tokenizer
# Transform
tokenizer:
_component_: torchtune.models.llama3_2_vision.llama3_2_vision_transform
path: /tmp/Llama-3.2-11B-Vision-Instruct/original/tokenizer.model
image_size: 560
max_seq_len: 8192
# Checkpointer
checkpointer:
_component_: torchtune.training.FullModelHFCheckpointer
checkpoint_dir: /tmp/Llama-3.2-11B-Vision-Instruct/
checkpoint_files:
filename_format: model-{}-of-{}.safetensors
max_filename: "00005"
recipe_checkpoint: null
output_dir: /tmp/Llama-3.2-11B-Vision-Instruct/
model_type: LLAMA3_VISION
resume_from_checkpoint: False
# Dataset
dataset:
packed: False # Set to true for great speed ups
_component_: torchtune.datasets.multimodal.the_cauldron_dataset
subset: ocrvqa
seed: null
shuffle: True
collate_fn: torchtune.data.padded_collate_tiled_images_and_mask
# Fine-tuning arguments
epochs: 1
max_steps_per_epoch: null
batch_size: 2
gradient_accumulation_steps: 4
optimizer:
_component_: torch.optim.AdamW
lr: 2e-5
fused: True
optimizer_in_bwd: False # Set to True to use less memory. Requires gradient_accumulation_steps=1.
loss:
_component_: torchtune.modules.loss.CEWithChunkedOutputLoss
clip_grad_norm: 1.0
compile: False # pytorch compile, set to true for perf/memory improvement
# Training env
device: cuda
# Memory management
enable_activation_checkpointing: True
custom_sharded_layers: ['decoder.tok_embeddings']
dtype: bf16
# Logging
output_dir: /tmp/full-llama3.2-vision--finetune
metric_logger:
_component_: torchtune.training.metric_logging.DiskLogger
log_dir: /tmp/Llama-3.2-11B-Vision-Instruct/logs
log_every_n_steps: 1
log_peak_memory_stats: True