223 lines
7.8 KiB
Python
223 lines
7.8 KiB
Python
|
# Copyright (c) OpenMMLab. All rights reserved.
|
|||
|
import torch
|
|||
|
from datasets import load_dataset
|
|||
|
from mmengine.dataset import DefaultSampler
|
|||
|
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
|
|||
|
LoggerHook, ParamSchedulerHook)
|
|||
|
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
|
|||
|
from peft import LoraConfig
|
|||
|
from torch.optim import AdamW
|
|||
|
from transformers import (AutoModelForCausalLM, AutoTokenizer,
|
|||
|
BitsAndBytesConfig)
|
|||
|
|
|||
|
from xtuner.dataset import ConcatDataset, process_hf_dataset
|
|||
|
from xtuner.dataset.collate_fns import default_collate_fn
|
|||
|
from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
|
|||
|
from xtuner.engine.hooks import (DatasetInfoHook, EvaluateChatHook,
|
|||
|
VarlenAttnArgsToMessageHubHook)
|
|||
|
from xtuner.engine.runner import TrainLoop
|
|||
|
from xtuner.model import SupervisedFinetune
|
|||
|
from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
|
|||
|
|
|||
|
from mmengine.visualization import Visualizer,WandbVisBackend, TensorboardVisBackend
|
|||
|
|
|||
|
#######################################################################
|
|||
|
# PART 1 Settings #
|
|||
|
#######################################################################
|
|||
|
# Model
|
|||
|
pretrained_model_name_or_path = '/root/share/model_repos/internlm2-chat-7b'
|
|||
|
# /root/share/model_repos/internlm2-chat-7b
|
|||
|
use_varlen_attn = False
|
|||
|
|
|||
|
# Data
|
|||
|
data_path1 = './datasets/output.json'
|
|||
|
data_path2 = './datasets/output2.json'
|
|||
|
prompt_template = PROMPT_TEMPLATE.internlm2_chat
|
|||
|
max_length = 4096
|
|||
|
pack_to_max_length = False
|
|||
|
|
|||
|
# Scheduler & Optimizer
|
|||
|
batch_size = 1 # per_device
|
|||
|
accumulative_counts = 4
|
|||
|
dataloader_num_workers = 1
|
|||
|
max_epochs = 5
|
|||
|
optim_type = AdamW
|
|||
|
lr = 1e-6
|
|||
|
betas = (0.9, 0.999)
|
|||
|
weight_decay = 0.0001
|
|||
|
max_norm = 1 # grad clip
|
|||
|
warmup_ratio = 0.03
|
|||
|
|
|||
|
# Save
|
|||
|
save_steps = 100
|
|||
|
save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)
|
|||
|
|
|||
|
# Evaluate the generation performance during the training
|
|||
|
evaluation_freq = 100
|
|||
|
SYSTEM = "你是心理健康助手EmoLLM,由EmoLLM团队打造。你旨在通过专业心理咨询,协助来访者完成心理诊断。请充分利用专业心理学知识与咨询技术,一步步帮助来访者解决心理问题。"
|
|||
|
evaluation_inputs = [
|
|||
|
'我最近总是感到很焦虑,尤其是在学业上。我有个特别崇拜的同学,他好像在各方面都比我优秀,我总觉得自己怎么努力也追不上他,这让我压力特别大。', '我知道应该理性看待,但就是忍不住会去比较。我甚至晚上会因为这个睡不着觉,总想着怎样才能像他那样出色。'
|
|||
|
]
|
|||
|
|
|||
|
#######################################################################
|
|||
|
# PART 2 Model & Tokenizer #
|
|||
|
#######################################################################
|
|||
|
tokenizer = dict(
|
|||
|
type=AutoTokenizer.from_pretrained,
|
|||
|
pretrained_model_name_or_path=pretrained_model_name_or_path,
|
|||
|
trust_remote_code=True,
|
|||
|
padding_side='right')
|
|||
|
|
|||
|
model = dict(
|
|||
|
type=SupervisedFinetune,
|
|||
|
use_varlen_attn=use_varlen_attn,
|
|||
|
llm=dict(
|
|||
|
type=AutoModelForCausalLM.from_pretrained,
|
|||
|
pretrained_model_name_or_path=pretrained_model_name_or_path,
|
|||
|
trust_remote_code=True,
|
|||
|
torch_dtype=torch.bfloat16,
|
|||
|
))
|
|||
|
|
|||
|
|
|||
|
#######################################################################
|
|||
|
# PART 3 Dataset & Dataloader #
|
|||
|
#######################################################################
|
|||
|
data1 = dict(
|
|||
|
type=process_hf_dataset,
|
|||
|
dataset=dict(type=load_dataset, path='json', data_files=dict(train=data_path1)),
|
|||
|
tokenizer=tokenizer,
|
|||
|
max_length=max_length,
|
|||
|
dataset_map_fn=None,
|
|||
|
template_map_fn=dict(
|
|||
|
type=template_map_fn_factory, template=prompt_template),
|
|||
|
remove_unused_columns=True,
|
|||
|
shuffle_before_pack=True,
|
|||
|
pack_to_max_length=pack_to_max_length,
|
|||
|
use_varlen_attn=use_varlen_attn)
|
|||
|
|
|||
|
data2 = dict(
|
|||
|
type=process_hf_dataset,
|
|||
|
dataset=dict(type=load_dataset, path='json', data_files=dict(train=data_path2)),
|
|||
|
tokenizer=tokenizer,
|
|||
|
max_length=max_length,
|
|||
|
dataset_map_fn=None,
|
|||
|
template_map_fn=dict(
|
|||
|
type=template_map_fn_factory, template=prompt_template),
|
|||
|
remove_unused_columns=True,
|
|||
|
shuffle_before_pack=True,
|
|||
|
pack_to_max_length=pack_to_max_length,
|
|||
|
use_varlen_attn=use_varlen_attn)
|
|||
|
|
|||
|
|
|||
|
train_dataset = dict(
|
|||
|
type=ConcatDataset, datasets=[data1, data2])
|
|||
|
|
|||
|
train_dataloader = dict(
|
|||
|
batch_size=batch_size,
|
|||
|
num_workers=dataloader_num_workers,
|
|||
|
dataset=train_dataset,
|
|||
|
sampler=dict(type=DefaultSampler, shuffle=True),
|
|||
|
collate_fn=dict(type=default_collate_fn, use_varlen_attn=use_varlen_attn))
|
|||
|
|
|||
|
#######################################################################
|
|||
|
# PART 4 Scheduler & Optimizer #
|
|||
|
#######################################################################
|
|||
|
# optimizer
|
|||
|
optim_wrapper = dict(
|
|||
|
type=AmpOptimWrapper, # AmpOptimWrapper
|
|||
|
optimizer=dict(
|
|||
|
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
|
|||
|
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
|
|||
|
accumulative_counts=accumulative_counts,
|
|||
|
loss_scale='dynamic',
|
|||
|
dtype='bfloat16')
|
|||
|
|
|||
|
# learning policy
|
|||
|
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
|
|||
|
param_scheduler = [
|
|||
|
dict(
|
|||
|
type=LinearLR,
|
|||
|
start_factor=1e-5,
|
|||
|
by_epoch=True,
|
|||
|
begin=0,
|
|||
|
end=warmup_ratio * max_epochs,
|
|||
|
convert_to_iter_based=True),
|
|||
|
dict(
|
|||
|
type=CosineAnnealingLR,
|
|||
|
eta_min=0.0,
|
|||
|
by_epoch=True,
|
|||
|
begin=warmup_ratio * max_epochs,
|
|||
|
end=max_epochs,
|
|||
|
convert_to_iter_based=True)
|
|||
|
]
|
|||
|
|
|||
|
# train, val, test setting
|
|||
|
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
|
|||
|
|
|||
|
#######################################################################
|
|||
|
# PART 5 Runtime #
|
|||
|
#######################################################################
|
|||
|
# Log the dialogue periodically during the training process, optional
|
|||
|
custom_hooks = [
|
|||
|
dict(type=DatasetInfoHook, tokenizer=tokenizer),
|
|||
|
dict(
|
|||
|
type=EvaluateChatHook,
|
|||
|
tokenizer=tokenizer,
|
|||
|
every_n_iters=evaluation_freq,
|
|||
|
evaluation_inputs=evaluation_inputs,
|
|||
|
system=SYSTEM,
|
|||
|
prompt_template=prompt_template)
|
|||
|
]
|
|||
|
|
|||
|
if use_varlen_attn:
|
|||
|
custom_hooks += [dict(type=VarlenAttnArgsToMessageHubHook)]
|
|||
|
|
|||
|
# configure default hooks
|
|||
|
default_hooks = dict(
|
|||
|
# record the time of every iteration.
|
|||
|
timer=dict(type=IterTimerHook),
|
|||
|
# print log every 10 iterations.
|
|||
|
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
|
|||
|
# enable the parameter scheduler.
|
|||
|
param_scheduler=dict(type=ParamSchedulerHook),
|
|||
|
# save checkpoint per `save_steps`.
|
|||
|
checkpoint=dict(
|
|||
|
type=CheckpointHook,
|
|||
|
by_epoch=False,
|
|||
|
interval=save_steps,
|
|||
|
max_keep_ckpts=save_total_limit),
|
|||
|
# set sampler seed in distributed evrionment.
|
|||
|
sampler_seed=dict(type=DistSamplerSeedHook),
|
|||
|
)
|
|||
|
|
|||
|
# configure environment
|
|||
|
env_cfg = dict(
|
|||
|
# whether to enable cudnn benchmark
|
|||
|
cudnn_benchmark=False,
|
|||
|
# set multi process parameters
|
|||
|
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
|||
|
# set distributed parameters
|
|||
|
dist_cfg=dict(backend='nccl'),
|
|||
|
)
|
|||
|
|
|||
|
# set visualizer
|
|||
|
visualizer = dict(
|
|||
|
type=Visualizer,
|
|||
|
vis_backends=[dict(type=WandbVisBackend)]
|
|||
|
)
|
|||
|
|
|||
|
# set log level
|
|||
|
log_level = 'INFO'
|
|||
|
|
|||
|
# load from which checkpoint
|
|||
|
load_from = '/root/Emollm/work_dirs/internlm2_chat_7b_full/iter_7000.pth'
|
|||
|
|
|||
|
# whether to resume training from the loaded checkpoint
|
|||
|
resume = True
|
|||
|
|
|||
|
# Defaults to use random seed and disable `deterministic`
|
|||
|
randomness = dict(seed=None, deterministic=False)
|
|||
|
|
|||
|
# set log processor
|
|||
|
log_processor = dict(by_epoch=False)
|