Merge pull request #80 from chg0901/main
update qwen, zhipuai gen_data and readme
This commit is contained in:
commit
71c46bcdbf
@ -1,11 +1,11 @@
|
||||
aistudio _token : '{your_token}' # 文心一言的token
|
||||
dashscope_api_key : '{your_api_key}' #通义千问的api_key
|
||||
zhiouai_api_key : '{your_api_key}' # 智浦AI的密钥
|
||||
aistudio_token : '{your_token}' # 文心一言的token
|
||||
dashscope_api_key : '{your_api_key}' # 通义千问的api_key
|
||||
zhiouai_api_key : '{your_api_key}' # 智谱AI的密钥
|
||||
|
||||
# 星火大模型配置
|
||||
appid : "{}" # 填写控制台中获取的 APPID 信息
|
||||
appid : "{}" # 填写控制台中获取的 APPID 信息
|
||||
api_secret : "{}" # 填写控制台中获取的 APISecret 信息
|
||||
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
||||
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
||||
|
||||
|
||||
system : '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决'
|
||||
@ -56,4 +56,4 @@ areas_of_life : [
|
||||
"安全",
|
||||
"梦想",
|
||||
"自由"
|
||||
]
|
||||
]
|
||||
|
105
generate_data/qwen_gen_data_NoBash.py
Normal file
105
generate_data/qwen_gen_data_NoBash.py
Normal file
@ -0,0 +1,105 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
import random
|
||||
import argparse
|
||||
import yaml
|
||||
import re
|
||||
import os
|
||||
import json
|
||||
|
||||
from tqdm import tqdm
|
||||
import dashscope
|
||||
from http import HTTPStatus
|
||||
|
||||
with open('config.yml', 'r', encoding='utf-8') as f:
|
||||
configs = yaml.load(f.read(), Loader=yaml.FullLoader)
|
||||
|
||||
def qwen_api(data, emo):
|
||||
dashscope.api_key = configs['dashscope_api_key']
|
||||
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健康问题的病人和心理健康医生的连续的多轮对话记录。
|
||||
要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。
|
||||
注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请返回完整的对话内容。
|
||||
请以如下格式返回生成的数据:
|
||||
病人:病人的咨询或陈述
|
||||
医生:医生的安抚和建议
|
||||
'''
|
||||
response = dashscope.Generation.call(
|
||||
model='qwen-max',
|
||||
prompt=prompt,
|
||||
history=[],
|
||||
)
|
||||
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
result = response.output.text
|
||||
print(result)
|
||||
else:
|
||||
result = 'ERROR'
|
||||
return result
|
||||
|
||||
|
||||
def save_jsonl(data_lis, file_path):
|
||||
if not os.path.exists(os.path.dirname(file_path)):
|
||||
os.makedirs(os.path.dirname(file_path))
|
||||
with open(file_path, 'at', encoding='utf-8') as f:
|
||||
for item in data_lis:
|
||||
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser(description='数据生成参数')
|
||||
|
||||
parser.add_argument('--data', type=str, help='生活场景')
|
||||
|
||||
emotions_lis = configs['emotions_list']
|
||||
areas_of_life = configs['areas_of_life']
|
||||
ai_tool = 'qwen'
|
||||
|
||||
conversation_lis = []
|
||||
|
||||
for emo in emotions_lis:
|
||||
for area in areas_of_life:
|
||||
gen_path = f'./{ai_tool}/{area}/{emo}.jsonl'
|
||||
|
||||
for i in tqdm(range(100), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
||||
one_conversation = {
|
||||
"conversation": []
|
||||
}
|
||||
|
||||
res = qwen_api(data=area, emo=emo)
|
||||
print(area, emo)
|
||||
|
||||
# 一次会话
|
||||
doctor_pattern = r'医生:(.*?)(病人:|$)'
|
||||
|
||||
doctor_matches = re.findall(doctor_pattern, res, re.DOTALL)
|
||||
doctor_conversations = [match[0] for match in doctor_matches]
|
||||
|
||||
patient_pattern = r'病人:(.*?)医生:'
|
||||
patient_matches = re.findall(patient_pattern, res, re.DOTALL)
|
||||
patient_conversations = [match for match in patient_matches]
|
||||
|
||||
for doc, pat in zip(doctor_conversations, patient_conversations):
|
||||
if len(one_conversation['conversation']) == 0:
|
||||
one_conversation['conversation'].append(
|
||||
{
|
||||
"system": "现在你是一个心理专家, 我有一些心理问题, 请你用专业的知识帮我解决。",
|
||||
"input": pat,
|
||||
"output": doc
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
one_conversation['conversation'].append(
|
||||
{
|
||||
"input": pat,
|
||||
"output": doc
|
||||
},
|
||||
)
|
||||
conversation_lis.append(one_conversation)
|
||||
|
||||
# 每生成10条数据存储一次
|
||||
if ((i+1) % 10 == 0):
|
||||
save_jsonl(data_lis=conversation_lis, file_path=gen_path)
|
||||
print(f'generate {gen_path}')
|
||||
conversation_lis = [] # 清空
|
@ -1,4 +1,5 @@
|
||||
erniebot #文心一言
|
||||
dashscope # 通义千问
|
||||
zhipuai # 智浦
|
||||
websocket #调用星火大模型的时候会使用
|
||||
erniebot # 文心一言
|
||||
dashscope # 通义千问
|
||||
zhipuai # 智谱
|
||||
python-dotenv # 智谱
|
||||
websocket # 调用星火大模型的时候会使用
|
||||
|
@ -1,26 +1,26 @@
|
||||
# EmoLLM 微调数据生成教程
|
||||
|
||||
**一、目标与背景**
|
||||
## **一、目标与背景**
|
||||
|
||||
为了使我们的心理大模型有更好的表达效果,我们必须要有高质量的数据集。为了达到这一目标,我们决定利用四种强大的中文大模型:文心一言、通义千问、讯飞星火 和 智谱GLM 来生成对话数据。此外,我们还将增强数据集的认知深度,通过加入少量自我认知数据集来提高模型的泛化能力。
|
||||
|
||||
**二、数据集生成方法**
|
||||
## **二、数据集生成方法**
|
||||
|
||||
1. **模型选择与数据准备**
|
||||
|
||||
选择文心一言、通义千问、讯飞星火和智谱GLM这四种大语言模型,获取调用相应接口的API,并准备用于生成对话数据。
|
||||
|
||||
3. **单轮与多轮对话数据生成**
|
||||
|
||||
2. **单轮与多轮对话数据生成**
|
||||
|
||||
利用这四种模型,我们生成了10000条单轮和多轮对话数据。在这一过程中,我们确保了数据的多样性、复杂性和有效性。
|
||||
|
||||
因为心理活动往往是复杂的,为了保证数据的多样性。我们选择了16 * 28 共 `448`个场景进行数据集生成,具体场景名称请参考config.yml中的 `emotions_list 和 areas_of_life`两个参数的配置。
|
||||
|
||||
5. **自我认知数据集的加入**
|
||||
|
||||
3. **自我认知数据集的加入**
|
||||
|
||||
为了增强模型的认知能力,我们特意加入了一部分自我认知数据集。这些数据集有助于模型更好地理解上下文,提高对话的自然度和连贯性。
|
||||
|
||||
**三、实践步骤**
|
||||
## **三、实践步骤**
|
||||
|
||||
1. **初始化**
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
```bash
|
||||
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
* 准备输入数据和配置参数
|
||||
|
||||
可参见 `config.yml`均有注释
|
||||
@ -43,27 +44,37 @@
|
||||
3. **数据生成**
|
||||
|
||||
* 使用通义千问大模型进行数据生成
|
||||
|
||||
```bash
|
||||
# 终端运行
|
||||
bash run_qwen.bash
|
||||
|
||||
# 或者不使用终端运行
|
||||
python qwen_gen_data_NoBash.py
|
||||
```
|
||||
|
||||
* 使用百度文心大模型进行数据生成
|
||||
|
||||
```bash
|
||||
# 终端运行
|
||||
python ernie_gen_data.py
|
||||
```
|
||||
|
||||
* 使用智谱GLM大模型进行数据生成
|
||||
|
||||
```bash
|
||||
# 终端运行
|
||||
python zhipuai_gen_data.py
|
||||
```
|
||||
|
||||
* 使用讯飞星火大模型进行数据生成
|
||||
|
||||
```bash
|
||||
# 终端运行
|
||||
python ./xinghuo/gen_data.py
|
||||
```
|
||||
|
||||
4. **自我认知数据集的整合**
|
||||
1. **自我认知数据集的整合**
|
||||
|
||||
* 自我认知数据集需要按照格式手动生成,如下格式即可
|
||||
```json
|
||||
@ -90,8 +101,8 @@
|
||||
5. **数据集整合**
|
||||
|
||||
在进行数据集整合之前,我们要检查生成的数据是否存在格式错误,类型不符合等情况。我们需要check.py进行检查数据。最后再使用merge_json.py将所有的json整合为一个总的json文件。
|
||||
|
||||
7. **评估与优化**
|
||||
|
||||
6. **评估与优化**
|
||||
|
||||
* 使用适当的评估指标对生成的数据集进行评估
|
||||
* 根据评估结果进行必要的优化和调整
|
||||
|
@ -1,10 +1,10 @@
|
||||
# EmoLLM fine-tuning data generation tutorial
|
||||
|
||||
**I. Objectives and Background**
|
||||
## **I. Objectives and Background**
|
||||
|
||||
In order to have a better representation of our large mental models, we must have high quality datasets. To achieve this goal, we decided to use four powerful AI grand models: **Wenxin Yiyan**, **Tongyi Qianwen**, **Feifei Spark**, and **Zhipu GLM** to generate conversation data. In addition, we will enhance the cognitive depth of the dataset and improve the generalization ability of the model by adding a small number of self-cognitive datasets.
|
||||
|
||||
**II. dataset generation method**
|
||||
## **II. dataset generation method**
|
||||
|
||||
1. **Model selection and data preparation**
|
||||
|
||||
@ -15,12 +15,12 @@ In order to have a better representation of our large mental models, we must hav
|
||||
Using these four models, we generated 10,000 single and multi-turn conversation data. In doing so, we ensure the diversity, complexity and validity of our data.
|
||||
|
||||
Because mental activity is often complex, in order to ensure the diversity of data. We selected a total of 16 * 28 `448` scenarios for dataset generation. For specific scenario names, please refer to the configuration of the two parameters`emotions_list and areas_of_life`in config.yml.
|
||||
|
||||
|
||||
4. **Inclusion of self-perception datasets**
|
||||
|
||||
In order to enhance the cognitive ability of the model, we specially added a part of self-cognitive dataset. These datasets help the model better understand the context and improve the naturalness and coherence of the conversation.
|
||||
|
||||
**III. Practical steps**
|
||||
## **III. Practical steps**
|
||||
|
||||
1. **Initialize**
|
||||
|
||||
@ -45,21 +45,31 @@ In order to have a better representation of our large mental models, we must hav
|
||||
3. **Data generation**
|
||||
|
||||
* Data generation using Tongyi Qianwen
|
||||
|
||||
```bash
|
||||
# Terminal operation
|
||||
bash run_qwen.bash
|
||||
|
||||
# Or just use python without bash
|
||||
python qwen_gen_data_NoBash.py
|
||||
```
|
||||
|
||||
* Data generation using Wenxin Yiyan
|
||||
|
||||
```bash
|
||||
# Terminal operation
|
||||
python ernie_gen_data.py
|
||||
```
|
||||
|
||||
* Data generation using Zhipu GLM
|
||||
|
||||
```bash
|
||||
# Terminal operation
|
||||
python zhipuai_gen_data.py
|
||||
```
|
||||
|
||||
* Data generation using IFlystar Fire
|
||||
|
||||
```bash
|
||||
# Terminal operation
|
||||
python ./xinghuo/gen_data.py
|
||||
@ -68,6 +78,7 @@ In order to have a better representation of our large mental models, we must hav
|
||||
4. **Integration of self-cognition datasets**
|
||||
|
||||
* Self-cognition dataset this needs to be manually generated in accordance with the format, the following format can be
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
@ -102,5 +113,4 @@ Before dataset integration, we need to check whether the generated data has form
|
||||
|
||||
* Evaluate the trained model using an independent test set
|
||||
* Make necessary adjustments and optimizations based on test results
|
||||
* Deploy the final model into a real application
|
||||
*
|
||||
* Deploy the final model into a real application
|
@ -1,9 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import random
|
||||
import json
|
||||
import yaml
|
||||
from tqdm import tqdm
|
||||
# from dotenv import load_dotenv
|
||||
from dotenv import load_dotenv
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
|
||||
@ -22,10 +24,12 @@ def zhipu_api(data, emo):
|
||||
text.append(jsoncon)
|
||||
return text
|
||||
|
||||
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健
|
||||
康问题的病人和心理健康医生的连续的多轮对话记录。要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请只返回完整的对话内容。请以如下格式返回生成的数据:
|
||||
病人:病人的咨询或陈述
|
||||
医生:医生的安抚和建议
|
||||
prompt = f'''你是一个研究过无数具有心理健康问题的病人与心理健康医生对话的专家,请你构造一些符合实际情况的具有心理健康问题的病人和心理健康医生的连续的多轮对话记录。
|
||||
要求病人的问题属于{data}场景,具有{emo}情感,医生的回复尽可能包含心理辅导知识,并且能够一步步诱导病人说出自己的问题进而提供解决问题的可行方案。
|
||||
注意,构造的数据必须以医生的陈述为结束语,每次只需要构造一个案例并且不需要写案例一、二等等,请返回完整的对话内容。
|
||||
请以如下格式返回生成的数据:
|
||||
病人:病人的咨询或陈述
|
||||
医生:医生的安抚和建议
|
||||
'''
|
||||
|
||||
top_p = round(random.uniform(0.1, 0.9), 2)
|
||||
@ -42,7 +46,8 @@ def zhipu_api(data, emo):
|
||||
def convert(conversation):
|
||||
ret, one_conversation = {}, {}
|
||||
ret['conversation'] = []
|
||||
one_conversation['system'] = '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决。'
|
||||
|
||||
one_conversation['system'] = "现在你是一个心理专家, 我有一些心理问题, 请你用专业的知识帮我解决。"
|
||||
|
||||
while '病人:' in conversation and '医生:' in conversation:
|
||||
one_conversation['input'] = conversation.split('病人:')[1].split('医生:')[0]
|
||||
@ -57,7 +62,7 @@ def convert(conversation):
|
||||
def save_jsonl(data_lis, file_path):
|
||||
if not os.path.exists(os.path.dirname(file_path)):
|
||||
os.makedirs(os.path.dirname(file_path))
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
with open(file_path, 'at', encoding='utf-8') as f:
|
||||
for item in data_lis:
|
||||
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
||||
|
||||
@ -65,20 +70,23 @@ def save_jsonl(data_lis, file_path):
|
||||
if __name__ == '__main__':
|
||||
emotions_lis = configs['emotions_list']
|
||||
areas_of_life = configs['areas_of_life']
|
||||
ai_tool = 'zhipuai'
|
||||
|
||||
conversation_lis = []
|
||||
for emo in emotions_lis:
|
||||
for area in areas_of_life:
|
||||
if os.path.exists(f'./zhipuai/{area}/{emo}.jsonl'):
|
||||
print(f'./zhipuai/{area}/{emo}.jsonl exists')
|
||||
continue
|
||||
for i in tqdm(range(5), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
||||
gen_path = f'./{ai_tool}/{area}/{emo}.jsonl'
|
||||
|
||||
for i in tqdm(range(100), desc='{emo}, {area}'.format(emo=emo, area=area)):
|
||||
res = zhipu_api(area, emo)
|
||||
print(res)
|
||||
if res == 'null':
|
||||
print(area, emo, 'error')
|
||||
continue
|
||||
conversation_lis.append(convert(res))
|
||||
save_jsonl(conversation_lis, f'./zhipuai/{area}/{emo}.jsonl')
|
||||
print(f'generate ./zhipuai/{area}/{emo}.jsonl')
|
||||
conversation_lis = []
|
||||
|
||||
if ((i+1) % 10 == 0):
|
||||
# path = f'./{args.data}.jsonl'
|
||||
save_jsonl(data_lis=conversation_lis, file_path=gen_path)
|
||||
print(f'generate {gen_path}')
|
||||
conversation_lis = [] # 清空
|
||||
|
Loading…
Reference in New Issue
Block a user