add Incremental Pre-training Guide (#235)

This commit is contained in:
xzw 2024-05-07 23:13:13 +08:00 committed by GitHub
commit 6371a5c703
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 288 additions and 0 deletions

View File

@ -98,6 +98,7 @@
</table>
### 🎇最近更新
- 【2024.5.7】[增量预训练指南](xtuner_config/pt/README.md)
- 【2024.4.20】[LLAMA3微调指南](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md)及基于[LLaMA3_8b_instruct的艾薇](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei)开源
- 【2023.4.14】新增[快速开始](docs/quick_start.md)和保姆级教程[BabyEmoLLM](Baby_EmoLLM.ipynb)
- 【2024.4.2】在 Huggingface 上传[老母亲心理咨询师](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main)

View File

@ -101,6 +101,7 @@ The Model aims to fully understand and promote the mental health of individuals,
</table>
### Recent Updates
- [2024.5.7][Incremental Pre-training Guide](xtuner_config/pt/README.md)
- [2024.4.20] [LLAMA3 fine-tuning guide](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md) and based on [LLaMA3_8b_instruct's aiwei](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei) open source
- [2023.4.14] Added [Quick Start](docs/quick_start_EN.md) and Nanny level tutorial [BabyEmoLLM](Baby_EmoLLM.ipynb)
- [2024.4.2] Uploaded at Huggingface [Old Mother Counsellor](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main)

View File

@ -0,0 +1,36 @@
# 增量预训练教程
# 增量预训练简介
增量预训练旨在提升模型在特定领域或任务的能力。
# 预训练流程
- Step1 处理数据
- Step2 配置config全量、Lora、Qlora
- Step3 启动训练单卡、多卡、是否使用deepspeed
- Step4 模型合成
- Step5 模型测试
- Step6 模型上传
# EmoLLM增量预训练教程
基于微调中的数据集[datasets](../../datasets)修改而来
- Step1 修改`ft2pt.py`中的文件路径
这里以[output2.json](../../datasets/processed/output2.json)为例,运行脚本生成[pt.json](../../datasets/pt/pt.json)
- Step2 [config](./internlm2_chat_1_8b_qlora_e3_pt.py)
注意本config采用了**变长注意力 (Variable Length Attention)**
需要安装flash_attn
`MAX_JOBS=4 pip install flash-attn --no-build-isolation`
- Step3 训练:
```
# On a single GPU
xtuner train internlm2_chat_1_8b_qlora_e3_pt.py --deepspeed deepspeed_zero2
# On multiple GPUs
(DIST) NPROC_PER_NODE=${GPU_NUM} xtuner train internlm2_chat_1_8b_qlora_e3_pt.py --deepspeed deepspeed_zero2
(SLURM) srun ${SRUN_ARGS} xtuner train internlm2_chat_1_8b_qlora_e3_pt.py --launcher slurm --deepspeed deepspeed_zero2
```
- 其余流程请参考[微调教程](../../xtuner_config/README.md)

48
xtuner_config/pt/ft2pt.py Normal file
View File

@ -0,0 +1,48 @@
# 将微调的数据格式转为预训练的格式
import json
def convert(data_path:str, target_path:str):
# 假设原始JSON数据存储在名为'data.json'的文件中
filename = data_path
# 读取文件内容
with open(filename, 'rt', encoding='utf-8') as file:
original_json = file.read()
# 将原始JSON字符串解析为Python对象
data = json.loads(original_json)
# 遍历每个对话
converted_data = []
# 遍历原始数据中的每个对话对象
for conversation_group in data:
# 遍历每个对话
for dialog in conversation_group["conversation"]:
# 创建一个新的对话对象,用于存储转换后的对话
new_conversation_group = {
"conversation": []
}
# 创建一个新的对话,其中输出被替换为"xxx"
new_dialog = {
"input": '',
"output": f'问题:{dialog["input"]}\n答案:{dialog["output"]}',
}
# 将新的对话添加到新对话对象的列表中
new_conversation_group["conversation"].append(new_dialog)
# 将新对话对象添加到转换后的数据列表中
converted_data.append(new_conversation_group)
# 将更新后的数据转换回JSON字符串并格式化输出
updated_json = json.dumps(converted_data, indent=4, ensure_ascii=False)
# 将更新后的JSON数据写入到新的文件中
with open(f'{target_path}', 'wt', encoding='utf-8') as file:
file.write(updated_json)
if __name__ == '__main__':
convert(data_path='./output2.json', target_path='pt.json')

View File

@ -0,0 +1,202 @@
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)
from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn
from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
from xtuner.engine.hooks import (DatasetInfoHook, EvaluateChatHook,
VarlenAttnArgsToMessageHubHook)
from xtuner.engine.runner import TrainLoop
from xtuner.model import SupervisedFinetune
from xtuner.parallel.sequence import SequenceParallelSampler
from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
#######################################################################
# PART 1 Settings #
#######################################################################
# Model
pretrained_model_name_or_path = '/root/share/new_models/Shanghai_AI_Laboratory/internlm2-chat-1_8b'
use_varlen_attn = True # True
# Data
data_path = '/root/wxz/work/pt/pt.json'
prompt_template = PROMPT_TEMPLATE.internlm2_chat
max_length = 2048
pack_to_max_length = True
# parallel
sequence_parallel_size = 1
# Scheduler & Optimizer
batch_size = 1 # per_device
accumulative_counts = 1
accumulative_counts *= sequence_parallel_size
dataloader_num_workers = 0
max_epochs = 3
optim_type = AdamW
lr = 2e-4
betas = (0.9, 0.999)
weight_decay = 0
max_norm = 1 # grad clip
warmup_ratio = 0.03
# Save
save_steps = 500
save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)
# Evaluate the generation performance during the training
evaluation_freq = 500
SYSTEM = SYSTEM_TEMPLATE.alpaca
evaluation_inputs = [
'我们经常因为一些小事争吵,他总是忽略我的感受。我感到很孤独,'
]
#######################################################################
# PART 2 Model & Tokenizer #
#######################################################################
tokenizer = dict(
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
padding_side='right')
model = dict(
type=SupervisedFinetune,
use_varlen_attn=use_varlen_attn,
llm=dict(
type=AutoModelForCausalLM.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16,),
lora=dict(
type=LoraConfig,
r=64,
lora_alpha=16,
lora_dropout=0.1,
bias='none',
task_type='CAUSAL_LM'))
#######################################################################
# PART 3 Dataset & Dataloader #
#######################################################################
alpaca_en = dict(
type=process_hf_dataset,
dataset=dict(type=load_dataset, path='json', data_files=dict(train=data_path)),
tokenizer=tokenizer,
max_length=max_length,
dataset_map_fn=None,
template_map_fn=None,
remove_unused_columns=True,
shuffle_before_pack=True,
pack_to_max_length=pack_to_max_length,
use_varlen_attn=use_varlen_attn)
sampler = SequenceParallelSampler \
if sequence_parallel_size > 1 else DefaultSampler
train_dataloader = dict(
batch_size=batch_size,
num_workers=dataloader_num_workers,
dataset=alpaca_en,
sampler=dict(type=sampler, shuffle=True),
collate_fn=dict(type=default_collate_fn, use_varlen_attn=use_varlen_attn))
#######################################################################
# PART 4 Scheduler & Optimizer #
#######################################################################
# optimizer
optim_wrapper = dict(
type=AmpOptimWrapper,
optimizer=dict(
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
accumulative_counts=accumulative_counts,
loss_scale='dynamic',
dtype='float16')
# learning policy
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
param_scheduler = [
dict(
type=LinearLR,
start_factor=1e-5,
by_epoch=True,
begin=0,
end=warmup_ratio * max_epochs,
convert_to_iter_based=True),
dict(
type=CosineAnnealingLR,
eta_min=0.0,
by_epoch=True,
begin=warmup_ratio * max_epochs,
end=max_epochs,
convert_to_iter_based=True)
]
# train, val, test setting
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
#######################################################################
# PART 5 Runtime #
#######################################################################
# Log the dialogue periodically during the training process, optional
custom_hooks = [
dict(type=DatasetInfoHook, tokenizer=tokenizer),
]
if use_varlen_attn:
custom_hooks += [dict(type=VarlenAttnArgsToMessageHubHook)]
# configure default hooks
default_hooks = dict(
# record the time of every iteration.
timer=dict(type=IterTimerHook),
# print log every 10 iterations.
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
# enable the parameter scheduler.
param_scheduler=dict(type=ParamSchedulerHook),
# save checkpoint per `save_steps`.
checkpoint=dict(
type=CheckpointHook,
by_epoch=False,
interval=save_steps,
max_keep_ckpts=save_total_limit),
# set sampler seed in distributed evrionment.
sampler_seed=dict(type=DistSamplerSeedHook),
)
# configure environment
env_cfg = dict(
# whether to enable cudnn benchmark
cudnn_benchmark=False,
# set multi process parameters
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
# set distributed parameters
dist_cfg=dict(backend='nccl'),
)
# set visualizer
visualizer = None
# set log level
log_level = 'INFO'
# load from which checkpoint
load_from = None
# whether to resume training from the loaded checkpoint
resume = False
# Defaults to use random seed and disable `deterministic`
randomness = dict(seed=None, deterministic=False)
# set log processor
log_processor = dict(by_epoch=False)