From 2bffdf60a33269dd6d2b283c55e9b46fc8a6327e Mon Sep 17 00:00:00 2001 From: chg0901 Date: Wed, 17 Jul 2024 10:26:09 +0800 Subject: [PATCH 1/3] webV3 opt --- web_internlm2_5.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_internlm2_5.py b/web_internlm2_5.py index 0fc39cf..b31b33d 100644 --- a/web_internlm2_5.py +++ b/web_internlm2_5.py @@ -223,7 +223,7 @@ cur_query_prompt = '<|im_start|>user\n{user}<|im_end|>\n\ def combine_history(prompt): messages = st.session_state.messages - meta_instruction = ('你是EmoLLM心理咨询师, 由EmoLLM团队打造, 是一个研究过无数具有心理咨询者与顶级专业心理咨询师对话的心理学教授, 在心理方面拥有广博的知识储备和丰富的研究咨询经验。你旨在通过专业心理咨询, 协助来访者完成心理诊断, 利用专业心理学知识与咨询技术一步步帮助来访者解决心理问题。') + meta_instruction = ('你是EmoLLM心理咨询师, 由EmoLLM团队打造, 是一个研究过无数具有心理咨询者与顶级专业心理咨询师对话的心理学教授, 在心理方面拥有广博的知识储备和丰富的研究咨询经验。你旨在通过专业心理咨询, 协助来访者完成心理诊断, 利用专业心理学知识与咨询技术一步步帮助来访者解决心理问题。如果有必要,请用“咨询者”称呼对话咨询的用户。') total_prompt = f'<|im_start|>system\n{meta_instruction}<|im_end|>\n' for message in messages: cur_content = message['content'] @@ -239,7 +239,7 @@ def combine_history(prompt): def main(): - st.markdown("我在这里,准备好倾听你的心声了。", unsafe_allow_html=True) + # st.markdown("我在这里,准备好倾听你的心声了。", unsafe_allow_html=True) # torch.cuda.empty_cache() print('load model begin.') model, tokenizer = load_model() From aed7c7aac394a54883af20f959684d8ff987cc86 Mon Sep 17 00:00:00 2001 From: chg0901 Date: Wed, 17 Jul 2024 10:35:57 +0800 Subject: [PATCH 2/3] =?UTF-8?q?=E5=85=A8=E9=87=8F=E5=BE=AE=E8=B0=832=20ful?= =?UTF-8?q?l=20fine-tuing?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 4ef247a..4932b33 100644 --- a/README_EN.md +++ b/README_EN.md @@ -48,7 +48,7 @@ | Model | Type | File Links | Model Links | | :-------------------: | :------: | :------------------------------------------------------------------------------------------------------: |:------: | -| InternLM2_5_7B_chat | 全量微调 | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | +| InternLM2_5_7B_chat | full fine-tuing | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | | InternLM2_5_7B_chat | QLORA | [internlm2_5_chat_7b_qlora_oasst1_e3.py](./xtuner_config/internlm2_5_chat_7b_qlora_oasst1_e3.py) |[ModelScope](https://www.modelscope.cn/models/z342994309/emollm_interlm2_5/) | | InternLM2_7B_chat | QLORA | [internlm2_7b_chat_qlora_e3.py](./xtuner_config/internlm2_7b_chat_qlora_e3.py) | [ModelScope](https://modelscope.cn/models/aJupyter/EmoLLM/files) | | InternLM2_7B_chat | full fine-tuing | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full) | From 65f6b4a2226a0cd8d27e625dee8b3ab0a3bd2eec Mon Sep 17 00:00:00 2001 From: chg0901 Date: Wed, 17 Jul 2024 14:25:44 +0800 Subject: [PATCH 3/3] ./xtuner_config/internlm2_5_chat_7b_full.py link update --- README.md | 2 +- README_EN.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 92ccc73..a852e6b 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ | 模型 | 类型 | 链接 | 模型链接 | | :-------------------: | :------: | :------------------------------------------------------------------------------------------------------: |:------: | -| InternLM2_5_7B_chat | 全量微调 | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | +| InternLM2_5_7B_chat | 全量微调 | [internlm2_5_chat_7b_full.py](./xtuner_config/internlm2_5_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | | InternLM2_5_7B_chat | QLORA | [internlm2_5_chat_7b_qlora_oasst1_e3.py](./xtuner_config/internlm2_5_chat_7b_qlora_oasst1_e3.py) |[ModelScope](https://www.modelscope.cn/models/z342994309/emollm_interlm2_5/) | | InternLM2_7B_chat | QLORA | [internlm2_7b_chat_qlora_e3.py](./xtuner_config/internlm2_7b_chat_qlora_e3.py) | [ModelScope](https://modelscope.cn/models/aJupyter/EmoLLM/files) | | InternLM2_7B_chat | 全量微调 | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full) | diff --git a/README_EN.md b/README_EN.md index 4932b33..c70fd2f 100644 --- a/README_EN.md +++ b/README_EN.md @@ -48,7 +48,7 @@ | Model | Type | File Links | Model Links | | :-------------------: | :------: | :------------------------------------------------------------------------------------------------------: |:------: | -| InternLM2_5_7B_chat | full fine-tuing | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | +| InternLM2_5_7B_chat | full fine-tuing | [internlm2_5_chat_7b_full.py](./xtuner_config/internlm2_5_chat_7b_full.py)| [OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM_V3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLMV3.0) | | InternLM2_5_7B_chat | QLORA | [internlm2_5_chat_7b_qlora_oasst1_e3.py](./xtuner_config/internlm2_5_chat_7b_qlora_oasst1_e3.py) |[ModelScope](https://www.modelscope.cn/models/z342994309/emollm_interlm2_5/) | | InternLM2_7B_chat | QLORA | [internlm2_7b_chat_qlora_e3.py](./xtuner_config/internlm2_7b_chat_qlora_e3.py) | [ModelScope](https://modelscope.cn/models/aJupyter/EmoLLM/files) | | InternLM2_7B_chat | full fine-tuing | [internlm2_chat_7b_full.py](./xtuner_config/internlm2_chat_7b_full.py) | [OpenXLab](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full) |