move scripts/upload_openxlab.py and scripts/trans_process.py
This commit is contained in:
commit
baba0d611d
@ -1,11 +1,11 @@
|
|||||||
aistudio _token : '{your_token}' # 文心一言的token
|
aistudio_token : '{your_token}' # 文心一言的token
|
||||||
dashscope_api_key : '{your_api_key}' #通义千问的api_key
|
dashscope_api_key : '{your_api_key}' # 通义千问的api_key
|
||||||
zhiouai_api_key : '{your_api_key}' # 智浦AI的密钥
|
zhiouai_api_key : '{your_api_key}' # 智谱AI的密钥
|
||||||
|
|
||||||
# 星火大模型配置
|
# 星火大模型配置
|
||||||
appid : "{}" # 填写控制台中获取的 APPID 信息
|
appid : "{}" # 填写控制台中获取的 APPID 信息
|
||||||
api_secret : "{}" # 填写控制台中获取的 APISecret 信息
|
api_secret : "{}" # 填写控制台中获取的 APISecret 信息
|
||||||
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
api_key : "{}" # 填写控制台中获取的 APIKey 信息
|
||||||
|
|
||||||
|
|
||||||
system : '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决'
|
system : '现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决'
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
erniebot #文心一言
|
erniebot # 文心一言
|
||||||
dashscope # 通义千问
|
dashscope # 通义千问
|
||||||
zhipuai # 智浦
|
zhipuai # 智谱
|
||||||
python-dotenv # 智浦
|
python-dotenv # 智谱
|
||||||
websocket #调用星火大模型的时候会使用
|
websocket # 调用星火大模型的时候会使用
|
||||||
|
78
generate_data/trans_process.py
Normal file
78
generate_data/trans_process.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
import json
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
|
||||||
|
def qwen_api(prompt):
|
||||||
|
import dashscope
|
||||||
|
from http import HTTPStatus
|
||||||
|
|
||||||
|
dashscope.api_key = "your key"
|
||||||
|
prompt = "你是一位非常擅长将英文翻译成中文的专家。请你将下面的英文翻译成正确地道的中文,要求只返回翻译的中文句子:\n" + prompt
|
||||||
|
response = dashscope.Generation.call(
|
||||||
|
model='qwen-max',
|
||||||
|
prompt=prompt,
|
||||||
|
history=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == HTTPStatus.OK:
|
||||||
|
result = response.output.text
|
||||||
|
# print(result)
|
||||||
|
else:
|
||||||
|
result = 'ERROR'
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_conversation_list():
|
||||||
|
with open('./ESConv.json', 'rt', encoding='utf-8') as file:
|
||||||
|
data = json.load(file)
|
||||||
|
|
||||||
|
idx = 0
|
||||||
|
conversation_list = []
|
||||||
|
for itm in tqdm(data):
|
||||||
|
one_conversation = {
|
||||||
|
"conversation": []
|
||||||
|
}
|
||||||
|
dia_tuple = []
|
||||||
|
for dia in tqdm(itm['dialog']):
|
||||||
|
# print(dia['speaker'], dia['content'])
|
||||||
|
if dia['speaker'] == 'seeker':
|
||||||
|
dia_tuple.append(qwen_api(dia['content']))
|
||||||
|
elif dia['speaker'] == 'supporter':
|
||||||
|
dia_tuple.append(qwen_api(dia['content']))
|
||||||
|
else:
|
||||||
|
exit("不存在角色!")
|
||||||
|
|
||||||
|
if len(dia_tuple) == 2 and len(one_conversation['conversation']) == 0:
|
||||||
|
one_conversation['conversation'].append(
|
||||||
|
{
|
||||||
|
"system": "现在你是一个心理专家,我有一些心理问题,请你用专业的知识帮我解决。",
|
||||||
|
"input": dia_tuple[0],
|
||||||
|
"output": dia_tuple[1]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
dia_tuple = []
|
||||||
|
|
||||||
|
elif len(dia_tuple) == 2:
|
||||||
|
one_conversation['conversation'].append(
|
||||||
|
{
|
||||||
|
"input": dia_tuple[0],
|
||||||
|
"output": dia_tuple[1]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
dia_tuple = []
|
||||||
|
|
||||||
|
conversation_list.append(one_conversation)
|
||||||
|
idx += 1
|
||||||
|
|
||||||
|
# if (idx == 1):
|
||||||
|
# print(conversation_list)
|
||||||
|
# break
|
||||||
|
print(idx)
|
||||||
|
return conversation_list
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
conversation_list = get_conversation_list()
|
||||||
|
# 将conversation_list保存为一个json文件
|
||||||
|
with open('conversation_list.json', 'wt', encoding='utf-8') as f:
|
||||||
|
json.dump(conversation_list, f, ensure_ascii=False)
|
3
generate_data/upload_openxlab.py
Normal file
3
generate_data/upload_openxlab.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
os.system("openxlab model create --model-repo='jujimeizuo/EmoLLM_Model' -s ./metafile.yml")
|
@ -34,11 +34,21 @@ def zhipu_api(data, emo):
|
|||||||
|
|
||||||
top_p = round(random.uniform(0.1, 0.9), 2)
|
top_p = round(random.uniform(0.1, 0.9), 2)
|
||||||
messages = getText('user', prompt)
|
messages = getText('user', prompt)
|
||||||
response = client.chat.completions.create(
|
|
||||||
model='glm-4',
|
# Error code: 400, with error text {"error":{"code":"1301","message":
|
||||||
messages=messages,
|
# "系统检测到输入或生成内容可能包含不安全或敏感内容,请您避免输入易产生敏感内容的提示语,感谢您的配合。"}}
|
||||||
top_p=top_p,
|
try:
|
||||||
)
|
response = client.chat.completions.create(
|
||||||
|
model='glm-4',
|
||||||
|
messages=messages,
|
||||||
|
top_p=top_p,
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model='glm-4',
|
||||||
|
messages=messages,
|
||||||
|
top_p=top_p,
|
||||||
|
)
|
||||||
|
|
||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user