Merge pull request #80 from chg0901/main
update qwen, zhipuai gen_data and readme
This commit is contained in:
commit
71c46bcdbf
@ -1,11 +1,11 @@
|
|||||||
aistudio _token : '{your_token}' # 文心一言的token
|
aistudio_token : '{your_token}' # 文心一言的token
|
||||||
dashscope_api_key : '{your_api_key}' #通义千问的api_key
|
dashscope_api_key : '{your_api_key}' # 通义千问的api_key
|
||||||
zhiouai_api_key : '{your_api_key}' # 智浦AI的密钥
|
zhiouai_api_key : '{your_api_key}' # 智谱AI的密钥
|
||||||
|
|
||||||
# 星火大模型配置
|
# 星火大模型配置
|
||||||
appid : "{}" # 填写控制台中获取的 APPID 信息
|
appid : "{}" # 填写控制台中获取的 APPID 信息
|
||||||
api_secret : "{}" # 填写控制台中获取的 APISecret 信息
|
api_secret : "{}" # 填写控制台中获取的 APISecret 信息
|
||||||
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
||||||
|
|
||||||
|
|
||||||
system : '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决'
|
system : '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决'
|
||||||
|
105
generate_data/qwen_gen_data_NoBash.py
Normal file
105
generate_data/qwen_gen_data_NoBash.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import json
|
||||||
|
import random
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
|
from tqdm import tqdm
|
||||||
|
import dashscope
|
||||||
|
from http import HTTPStatus
|
||||||
|
|
||||||
|
with open('config.yml', 'r', encoding='utf-8') as f:
|
||||||
|
configs = yaml.load(f.read(), Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
def qwen_api(data, emo):
|
||||||
|
dashscope.api_key = configs['dashscope_api_key']
|
||||||
|
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健康问题的病人和心理健康医生的连续的多轮对话记录。
|
||||||
|
要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。
|
||||||
|
注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请返回完整的对话内容。
|
||||||
|
请以如下格式返回生成的数据:
|
||||||
|
病人:病人的咨询或陈述
|
||||||
|
医生:医生的安抚和建议
|
||||||
|
'''
|
||||||
|
response = dashscope.Generation.call(
|
||||||
|
model='qwen-max',
|
||||||
|
prompt=prompt,
|
||||||
|
history=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == HTTPStatus.OK:
|
||||||
|
result = response.output.text
|
||||||
|
print(result)
|
||||||
|
else:
|
||||||
|
result = 'ERROR'
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def save_jsonl(data_lis, file_path):
|
||||||
|
if not os.path.exists(os.path.dirname(file_path)):
|
||||||
|
os.makedirs(os.path.dirname(file_path))
|
||||||
|
with open(file_path, 'at', encoding='utf-8') as f:
|
||||||
|
for item in data_lis:
|
||||||
|
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='数据生成参数')
|
||||||
|
|
||||||
|
parser.add_argument('--data', type=str, help='生活场景')
|
||||||
|
|
||||||
|
emotions_lis = configs['emotions_list']
|
||||||
|
areas_of_life = configs['areas_of_life']
|
||||||
|
ai_tool = 'qwen'
|
||||||
|
|
||||||
|
conversation_lis = []
|
||||||
|
|
||||||
|
for emo in emotions_lis:
|
||||||
|
for area in areas_of_life:
|
||||||
|
gen_path = f'./{ai_tool}/{area}/{emo}.jsonl'
|
||||||
|
|
||||||
|
for i in tqdm(range(100), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
||||||
|
one_conversation = {
|
||||||
|
"conversation": []
|
||||||
|
}
|
||||||
|
|
||||||
|
res = qwen_api(data=area, emo=emo)
|
||||||
|
print(area, emo)
|
||||||
|
|
||||||
|
# 一次会话
|
||||||
|
doctor_pattern = r'医生:(.*?)(病人:|$)'
|
||||||
|
|
||||||
|
doctor_matches = re.findall(doctor_pattern, res, re.DOTALL)
|
||||||
|
doctor_conversations = [match[0] for match in doctor_matches]
|
||||||
|
|
||||||
|
patient_pattern = r'病人:(.*?)医生:'
|
||||||
|
patient_matches = re.findall(patient_pattern, res, re.DOTALL)
|
||||||
|
patient_conversations = [match for match in patient_matches]
|
||||||
|
|
||||||
|
for doc, pat in zip(doctor_conversations, patient_conversations):
|
||||||
|
if len(one_conversation['conversation']) == 0:
|
||||||
|
one_conversation['conversation'].append(
|
||||||
|
{
|
||||||
|
"system": "现在你是一个心理专家, 我有一些心理问题, 请你用专业的知识帮我解决。",
|
||||||
|
"input": pat,
|
||||||
|
"output": doc
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
one_conversation['conversation'].append(
|
||||||
|
{
|
||||||
|
"input": pat,
|
||||||
|
"output": doc
|
||||||
|
},
|
||||||
|
)
|
||||||
|
conversation_lis.append(one_conversation)
|
||||||
|
|
||||||
|
# 每生成10条数据存储一次
|
||||||
|
if ((i+1) % 10 == 0):
|
||||||
|
save_jsonl(data_lis=conversation_lis, file_path=gen_path)
|
||||||
|
print(f'generate {gen_path}')
|
||||||
|
conversation_lis = [] # 清空
|
@ -1,4 +1,5 @@
|
|||||||
erniebot #文心一言
|
erniebot # 文心一言
|
||||||
dashscope # 通义千问
|
dashscope # 通义千问
|
||||||
zhipuai # 智浦
|
zhipuai # 智谱
|
||||||
websocket #调用星火大模型的时候会使用
|
python-dotenv # 智谱
|
||||||
|
websocket # 调用星火大模型的时候会使用
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
# EmoLLM 微调数据生成教程
|
# EmoLLM 微调数据生成教程
|
||||||
|
|
||||||
**一、目标与背景**
|
## **一、目标与背景**
|
||||||
|
|
||||||
为了使我们的心理大模型有更好的表达效果,我们必须要有高质量的数据集。为了达到这一目标,我们决定利用四种强大的中文大模型:文心一言、通义千问、讯飞星火 和 智谱GLM 来生成对话数据。此外,我们还将增强数据集的认知深度,通过加入少量自我认知数据集来提高模型的泛化能力。
|
为了使我们的心理大模型有更好的表达效果,我们必须要有高质量的数据集。为了达到这一目标,我们决定利用四种强大的中文大模型:文心一言、通义千问、讯飞星火 和 智谱GLM 来生成对话数据。此外,我们还将增强数据集的认知深度,通过加入少量自我认知数据集来提高模型的泛化能力。
|
||||||
|
|
||||||
**二、数据集生成方法**
|
## **二、数据集生成方法**
|
||||||
|
|
||||||
1. **模型选择与数据准备**
|
1. **模型选择与数据准备**
|
||||||
|
|
||||||
选择文心一言、通义千问、讯飞星火和智谱GLM这四种大语言模型,获取调用相应接口的API,并准备用于生成对话数据。
|
选择文心一言、通义千问、讯飞星火和智谱GLM这四种大语言模型,获取调用相应接口的API,并准备用于生成对话数据。
|
||||||
|
|
||||||
3. **单轮与多轮对话数据生成**
|
2. **单轮与多轮对话数据生成**
|
||||||
|
|
||||||
利用这四种模型,我们生成了10000条单轮和多轮对话数据。在这一过程中,我们确保了数据的多样性、复杂性和有效性。
|
利用这四种模型,我们生成了10000条单轮和多轮对话数据。在这一过程中,我们确保了数据的多样性、复杂性和有效性。
|
||||||
|
|
||||||
因为心理活动往往是复杂的,为了保证数据的多样性。我们选择了16 * 28 共 `448`个场景进行数据集生成,具体场景名称请参考config.yml中的 `emotions_list 和 areas_of_life`两个参数的配置。
|
因为心理活动往往是复杂的,为了保证数据的多样性。我们选择了16 * 28 共 `448`个场景进行数据集生成,具体场景名称请参考config.yml中的 `emotions_list 和 areas_of_life`两个参数的配置。
|
||||||
|
|
||||||
5. **自我认知数据集的加入**
|
3. **自我认知数据集的加入**
|
||||||
|
|
||||||
为了增强模型的认知能力,我们特意加入了一部分自我认知数据集。这些数据集有助于模型更好地理解上下文,提高对话的自然度和连贯性。
|
为了增强模型的认知能力,我们特意加入了一部分自我认知数据集。这些数据集有助于模型更好地理解上下文,提高对话的自然度和连贯性。
|
||||||
|
|
||||||
**三、实践步骤**
|
## **三、实践步骤**
|
||||||
|
|
||||||
1. **初始化**
|
1. **初始化**
|
||||||
|
|
||||||
@ -29,6 +29,7 @@
|
|||||||
```bash
|
```bash
|
||||||
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
* 准备输入数据和配置参数
|
* 准备输入数据和配置参数
|
||||||
|
|
||||||
可参见 `config.yml`均有注释
|
可参见 `config.yml`均有注释
|
||||||
@ -43,27 +44,37 @@
|
|||||||
3. **数据生成**
|
3. **数据生成**
|
||||||
|
|
||||||
* 使用通义千问大模型进行数据生成
|
* 使用通义千问大模型进行数据生成
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 终端运行
|
# 终端运行
|
||||||
bash run_qwen.bash
|
bash run_qwen.bash
|
||||||
|
|
||||||
|
# 或者不使用终端运行
|
||||||
|
python qwen_gen_data_NoBash.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* 使用百度文心大模型进行数据生成
|
* 使用百度文心大模型进行数据生成
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 终端运行
|
# 终端运行
|
||||||
python ernie_gen_data.py
|
python ernie_gen_data.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* 使用智谱GLM大模型进行数据生成
|
* 使用智谱GLM大模型进行数据生成
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 终端运行
|
# 终端运行
|
||||||
python zhipuai_gen_data.py
|
python zhipuai_gen_data.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* 使用讯飞星火大模型进行数据生成
|
* 使用讯飞星火大模型进行数据生成
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 终端运行
|
# 终端运行
|
||||||
python ./xinghuo/gen_data.py
|
python ./xinghuo/gen_data.py
|
||||||
```
|
```
|
||||||
|
|
||||||
4. **自我认知数据集的整合**
|
1. **自我认知数据集的整合**
|
||||||
|
|
||||||
* 自我认知数据集需要按照格式手动生成,如下格式即可
|
* 自我认知数据集需要按照格式手动生成,如下格式即可
|
||||||
```json
|
```json
|
||||||
@ -91,7 +102,7 @@
|
|||||||
|
|
||||||
在进行数据集整合之前,我们要检查生成的数据是否存在格式错误,类型不符合等情况。我们需要check.py进行检查数据。最后再使用merge_json.py将所有的json整合为一个总的json文件。
|
在进行数据集整合之前,我们要检查生成的数据是否存在格式错误,类型不符合等情况。我们需要check.py进行检查数据。最后再使用merge_json.py将所有的json整合为一个总的json文件。
|
||||||
|
|
||||||
7. **评估与优化**
|
6. **评估与优化**
|
||||||
|
|
||||||
* 使用适当的评估指标对生成的数据集进行评估
|
* 使用适当的评估指标对生成的数据集进行评估
|
||||||
* 根据评估结果进行必要的优化和调整
|
* 根据评估结果进行必要的优化和调整
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# EmoLLM fine-tuning data generation tutorial
|
# EmoLLM fine-tuning data generation tutorial
|
||||||
|
|
||||||
**I. Objectives and Background**
|
## **I. Objectives and Background**
|
||||||
|
|
||||||
In order to have a better representation of our large mental models, we must have high quality datasets. To achieve this goal, we decided to use four powerful AI grand models: **Wenxin Yiyan**, **Tongyi Qianwen**, **Feifei Spark**, and **Zhipu GLM** to generate conversation data. In addition, we will enhance the cognitive depth of the dataset and improve the generalization ability of the model by adding a small number of self-cognitive datasets.
|
In order to have a better representation of our large mental models, we must have high quality datasets. To achieve this goal, we decided to use four powerful AI grand models: **Wenxin Yiyan**, **Tongyi Qianwen**, **Feifei Spark**, and **Zhipu GLM** to generate conversation data. In addition, we will enhance the cognitive depth of the dataset and improve the generalization ability of the model by adding a small number of self-cognitive datasets.
|
||||||
|
|
||||||
**II. dataset generation method**
|
## **II. dataset generation method**
|
||||||
|
|
||||||
1. **Model selection and data preparation**
|
1. **Model selection and data preparation**
|
||||||
|
|
||||||
@ -20,7 +20,7 @@ In order to have a better representation of our large mental models, we must hav
|
|||||||
|
|
||||||
In order to enhance the cognitive ability of the model, we specially added a part of self-cognitive dataset. These datasets help the model better understand the context and improve the naturalness and coherence of the conversation.
|
In order to enhance the cognitive ability of the model, we specially added a part of self-cognitive dataset. These datasets help the model better understand the context and improve the naturalness and coherence of the conversation.
|
||||||
|
|
||||||
**III. Practical steps**
|
## **III. Practical steps**
|
||||||
|
|
||||||
1. **Initialize**
|
1. **Initialize**
|
||||||
|
|
||||||
@ -45,21 +45,31 @@ In order to have a better representation of our large mental models, we must hav
|
|||||||
3. **Data generation**
|
3. **Data generation**
|
||||||
|
|
||||||
* Data generation using Tongyi Qianwen
|
* Data generation using Tongyi Qianwen
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Terminal operation
|
# Terminal operation
|
||||||
bash run_qwen.bash
|
bash run_qwen.bash
|
||||||
|
|
||||||
|
# Or just use python without bash
|
||||||
|
python qwen_gen_data_NoBash.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* Data generation using Wenxin Yiyan
|
* Data generation using Wenxin Yiyan
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Terminal operation
|
# Terminal operation
|
||||||
python ernie_gen_data.py
|
python ernie_gen_data.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* Data generation using Zhipu GLM
|
* Data generation using Zhipu GLM
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Terminal operation
|
# Terminal operation
|
||||||
python zhipuai_gen_data.py
|
python zhipuai_gen_data.py
|
||||||
```
|
```
|
||||||
|
|
||||||
* Data generation using IFlystar Fire
|
* Data generation using IFlystar Fire
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Terminal operation
|
# Terminal operation
|
||||||
python ./xinghuo/gen_data.py
|
python ./xinghuo/gen_data.py
|
||||||
@ -68,6 +78,7 @@ In order to have a better representation of our large mental models, we must hav
|
|||||||
4. **Integration of self-cognition datasets**
|
4. **Integration of self-cognition datasets**
|
||||||
|
|
||||||
* Self-cognition dataset this needs to be manually generated in accordance with the format, the following format can be
|
* Self-cognition dataset this needs to be manually generated in accordance with the format, the following format can be
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@ -103,4 +114,3 @@ Before dataset integration, we need to check whether the generated data has form
|
|||||||
* Evaluate the trained model using an independent test set
|
* Evaluate the trained model using an independent test set
|
||||||
* Make necessary adjustments and optimizations based on test results
|
* Make necessary adjustments and optimizations based on test results
|
||||||
* Deploy the final model into a real application
|
* Deploy the final model into a real application
|
||||||
*
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import json
|
import json
|
||||||
import yaml
|
import yaml
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
# from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from zhipuai import ZhipuAI
|
from zhipuai import ZhipuAI
|
||||||
|
|
||||||
|
|
||||||
@ -22,10 +24,12 @@ def zhipu_api(data, emo):
|
|||||||
text.append(jsoncon)
|
text.append(jsoncon)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健
|
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健康问题的病人和心理健康医生的连续的多轮对话记录。
|
||||||
康问题的病人和心理健康医生的连续的多轮对话记录。要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请只返回完整的对话内容。请以如下格式返回生成的数据:
|
要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。
|
||||||
病人:病人的咨询或陈述
|
注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请返回完整的对话内容。
|
||||||
医生:医生的安抚和建议
|
请以如下格式返回生成的数据:
|
||||||
|
病人:病人的咨询或陈述
|
||||||
|
医生:医生的安抚和建议
|
||||||
'''
|
'''
|
||||||
|
|
||||||
top_p = round(random.uniform(0.1, 0.9), 2)
|
top_p = round(random.uniform(0.1, 0.9), 2)
|
||||||
@ -42,7 +46,8 @@ def zhipu_api(data, emo):
|
|||||||
def convert(conversation):
|
def convert(conversation):
|
||||||
ret, one_conversation = {}, {}
|
ret, one_conversation = {}, {}
|
||||||
ret['conversation'] = []
|
ret['conversation'] = []
|
||||||
one_conversation['system'] = '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决。'
|
|
||||||
|
one_conversation['system'] = "现在你是一个心理专家, 我有一些心理问题, 请你用专业的知识帮我解决。"
|
||||||
|
|
||||||
while '病人:' in conversation and '医生:' in conversation:
|
while '病人:' in conversation and '医生:' in conversation:
|
||||||
one_conversation['input'] = conversation.split('病人:')[1].split('医生:')[0]
|
one_conversation['input'] = conversation.split('病人:')[1].split('医生:')[0]
|
||||||
@ -57,7 +62,7 @@ def convert(conversation):
|
|||||||
def save_jsonl(data_lis, file_path):
|
def save_jsonl(data_lis, file_path):
|
||||||
if not os.path.exists(os.path.dirname(file_path)):
|
if not os.path.exists(os.path.dirname(file_path)):
|
||||||
os.makedirs(os.path.dirname(file_path))
|
os.makedirs(os.path.dirname(file_path))
|
||||||
with open(file_path, 'w', encoding='utf-8') as f:
|
with open(file_path, 'at', encoding='utf-8') as f:
|
||||||
for item in data_lis:
|
for item in data_lis:
|
||||||
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
||||||
|
|
||||||
@ -65,20 +70,23 @@ def save_jsonl(data_lis, file_path):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
emotions_lis = configs['emotions_list']
|
emotions_lis = configs['emotions_list']
|
||||||
areas_of_life = configs['areas_of_life']
|
areas_of_life = configs['areas_of_life']
|
||||||
|
ai_tool = 'zhipuai'
|
||||||
|
|
||||||
conversation_lis = []
|
conversation_lis = []
|
||||||
for emo in emotions_lis:
|
for emo in emotions_lis:
|
||||||
for area in areas_of_life:
|
for area in areas_of_life:
|
||||||
if os.path.exists(f'./zhipuai/{area}/{emo}.jsonl'):
|
gen_path = f'./{ai_tool}/{area}/{emo}.jsonl'
|
||||||
print(f'./zhipuai/{area}/{emo}.jsonl exists')
|
|
||||||
continue
|
for i in tqdm(range(100), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
||||||
for i in tqdm(range(5), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
|
||||||
res = zhipu_api(area, emo)
|
res = zhipu_api(area, emo)
|
||||||
print(res)
|
print(res)
|
||||||
if res == 'null':
|
if res == 'null':
|
||||||
print(area, emo, 'error')
|
print(area, emo, 'error')
|
||||||
continue
|
continue
|
||||||
conversation_lis.append(convert(res))
|
conversation_lis.append(convert(res))
|
||||||
save_jsonl(conversation_lis, f'./zhipuai/{area}/{emo}.jsonl')
|
|
||||||
print(f'generate ./zhipuai/{area}/{emo}.jsonl')
|
if ((i+1) % 10 == 0):
|
||||||
conversation_lis = []
|
# path = f'./{args.data}.jsonl'
|
||||||
|
save_jsonl(data_lis=conversation_lis, file_path=gen_path)
|
||||||
|
print(f'generate {gen_path}')
|
||||||
|
conversation_lis = [] # 清空
|
||||||
|
Loading…
Reference in New Issue
Block a user