From 10e6973a82e4d34c9fe6597b4662df0125683a08 Mon Sep 17 00:00:00 2001 From: HongCheng Date: Sat, 4 May 2024 13:25:51 +0900 Subject: [PATCH] Update llama3 web demo and tutorials --- .vscode/settings.json | 5 + README.md | 58 +-- README_EN.md | 59 +-- app.py | 346 +---------------- app_bk.py | 356 ------------------ app_web_demo-Llama3.py | 10 +- assets/new_openxlab_app_demo.png | Bin 0 -> 46372 bytes ...ME_llama3_8b_instruct_qlora_alpaca_e3_M.md | 26 +- 8 files changed, 108 insertions(+), 752 deletions(-) create mode 100644 .vscode/settings.json delete mode 100644 app_bk.py create mode 100644 assets/new_openxlab_app_demo.png diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..1f6e803 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "[python]": { + "editor.defaultFormatter": null + } +} \ No newline at end of file diff --git a/README.md b/README.md index c6625b5..76128f4 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ | DeepSeek MoE_16B_chat | QLORA | [deepseek_moe_16b_chat_qlora_oasst1_e3.py](./xtuner_config/deepseek_moe_16b_chat_qlora_oasst1_e3.py) | | | Mixtral 8x7B_instruct | QLORA | [mixtral_8x7b_instruct_qlora_oasst1_e3.py](./xtuner_config/mixtral_8x7b_instruct_qlora_oasst1_e3.py) | | | LLaMA3_8b_instruct | QLORA | [aiwei_llama3_8b_instruct_qlora_e3.py](./xtuner_config/aiwei_llama3_8b_instruct_qlora_e3.py) | | -| LLaMA3_8b_instruct | QLORA | [llama3_8b_instruct_qlora_alpaca_e3_M.py](./xtuner_config/llama3_8b_instruct_qlora_alpaca_e3_M.py) |[OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct2.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct2.0/summary) | +| LLaMA3_8b_instruct | QLORA | [llama3_8b_instruct_qlora_alpaca_e3_M_ruozhi_scM.py](./xtuner_config/llama3_8b_instruct_qlora_alpaca_e3_M_ruozhi_scM.py) |[OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct3.0/summary) | | …… | …… | …… | …… | @@ -97,44 +97,46 @@ -### 🎇最近更新 -- 【2024.4.20】[LLAMA3微调指南](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md)及基于[LLaMA3_8b_instruct的艾薇](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei)开源 -- 【2023.4.14】新增[快速开始](docs/quick_start.md)和保姆级教程[BabyEmoLLM](Baby_EmoLLM.ipynb) -- 【2024.4.2】在 Huggingface 上传[老母亲心理咨询师](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) -- 【2024.3.25】在百度飞桨平台发布[爹系男友心理咨询师](https://aistudio.baidu.com/community/app/68787) -- 【2024.3.24】在**OpenXLab**和**ModelScope**平台发布**InternLM2-Base-7B QLoRA微调模型**, 具体请查看[**InternLM2-Base-7B QLoRA**](./xtuner_config/README_internlm2_7b_base_qlora.md) -- 【2024.3.12】在百度飞桨平台发布[艾薇](https://aistudio.baidu.com/community/app/63335) -- 【2024.3.11】 **EmoLLM V2.0 相比 EmoLLM V1.0 全面提升,已超越 Role-playing ChatGPT 在心理咨询任务上的能力!**[点击体验EmoLLM V2.0](https://openxlab.org.cn/apps/detail/Farewell1/EmoLLMV2.0),更新[数据集统计及详细信息](./datasets/)、[路线图](./assets/Roadmap_ZH.png) -- 【2024.3.9】 新增并发功能加速 [QA 对生成](./scripts/qa_generation/)、[RAG pipeline](./rag/) -- 【2024.3.3】 [基于InternLM2-7B-chat全量微调版本EmoLLM V2.0开源](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full),需要两块A100*80G,更新专业评估,详见[evaluate](./evaluate/),更新基于PaddleOCR的PDF转txt工具脚本,详见[scripts](./scripts/) -- 【2024.2.29】更新客观评估计算,详见[evaluate](./evaluate/),更新一系列数据集,详见[datasets](./datasets/) -- 【2024.2.27】更新英文readme和一系列数据集(舔狗和单轮对话) -- 【2024.2.23】推出基于InternLM2_7B_chat_qlora的 `温柔御姐心理医生艾薇`,[点击获取模型权重](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_aiwei),[配置文件](xtuner_config/aiwei-internlm2_chat_7b_qlora.py),[在线体验链接](https://openxlab.org.cn/apps/detail/ajupyter/EmoLLM-aiwei) -- 【2024.2.23】更新[若干微调配置](/xtuner_config/),新增 [data_pro.json](/datasets/data_pro.json)(数量更多、场景更全、更丰富)和 [aiwei.json](/datasets/aiwei.json)(温柔御姐角色扮演专用,带有Emoji表情),即将推出 `温柔御姐心理医生艾薇` -- 【2024.2.18】 [基于Qwen1_5-0_5B-Chat全量微调版本开源](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary),算力有限的道友可以玩起来~ +## 🎇最近更新 + +- 【2024.05.04】基于LLaMA3_8b_instruct的[EmoLLM3.0 OpenXLab Demo](https://st-app-center-006861-9746-jlroxvg.openxlab.space/)上线([重启链接](https://openxlab.org.cn/apps/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0)), [**LLAMA3微调指南**](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md)**更新**,在[**OpenXLab**](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0)和[**ModelScope**](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct3.0/summary)平台发布**LLaMA3_8b_instruct-8B QLoRA微调模型 EmoLLM3.0权重** +- 【2024.04.20】[LLAMA3微调指南](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md)及基于[LLaMA3_8b_instruct的艾薇](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei)开源 +- 【2023.04.14】新增[快速开始](docs/quick_start.md)和保姆级教程[BabyEmoLLM](Baby_EmoLLM.ipynb) +- 【2024.04.02】在 Huggingface 上传[老母亲心理咨询师](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) +- 【2024.03.25】在百度飞桨平台发布[爹系男友心理咨询师](https://aistudio.baidu.com/community/app/68787) +- 【2024.03.24】在**OpenXLab**和**ModelScope**平台发布**InternLM2-Base-7B QLoRA微调模型**, 具体请查看[**InternLM2-Base-7B QLoRA**](./xtuner_config/README_internlm2_7b_base_qlora.md) +- 【2024.03.12】在百度飞桨平台发布[艾薇](https://aistudio.baidu.com/community/app/63335) +- 【2024.03.11】 **EmoLLM V2.0 相比 EmoLLM V1.0 全面提升,已超越 Role-playing ChatGPT 在心理咨询任务上的能力!**[点击体验EmoLLM V2.0](https://openxlab.org.cn/apps/detail/Farewell1/EmoLLMV2.0),更新[数据集统计及详细信息](./datasets/)、[路线图](./assets/Roadmap_ZH.png) +- 【2024.03.09】 新增并发功能加速 [QA 对生成](./scripts/qa_generation/)、[RAG pipeline](./rag/) +- 【2024.03.03】 [基于InternLM2-7B-chat全量微调版本EmoLLM V2.0开源](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full),需要两块A100*80G,更新专业评估,详见[evaluate](./evaluate/),更新基于PaddleOCR的PDF转txt工具脚本,详见[scripts](./scripts/) +- 【2024.02.29】更新客观评估计算,详见[evaluate](./evaluate/),更新一系列数据集,详见[datasets](./datasets/) +- 【2024.02.27】更新英文readme和一系列数据集(舔狗和单轮对话) +- 【2024.02.23】推出基于InternLM2_7B_chat_qlora的 `温柔御姐心理医生艾薇`,[点击获取模型权重](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_aiwei),[配置文件](xtuner_config/aiwei-internlm2_chat_7b_qlora.py),[在线体验链接](https://openxlab.org.cn/apps/detail/ajupyter/EmoLLM-aiwei) +- 【2024.02.23】更新[若干微调配置](/xtuner_config/),新增 [data_pro.json](/datasets/data_pro.json)(数量更多、场景更全、更丰富)和 [aiwei.json](/datasets/aiwei.json)(温柔御姐角色扮演专用,带有Emoji表情),即将推出 `温柔御姐心理医生艾薇` +- 【2024.02.18】 [基于Qwen1_5-0_5B-Chat全量微调版本开源](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary),算力有限的道友可以玩起来~
查看更多 -- 【2024.2.6】 EmoLLM在[**Openxlab** ](https://openxlab.org.cn/models/detail/jujimeizuo/EmoLLM_Model) 平台下载量高达18.7k,欢迎大家体验! +- 【2024.02.06】 EmoLLM在[**Openxlab** ](https://openxlab.org.cn/models/detail/jujimeizuo/EmoLLM_Model) 平台下载量高达18.7k,欢迎大家体验!

模型下载量

-- 【2024.2.5】 项目荣获公众号**NLP工程化**推文宣传[推文链接](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A),为博主推广一波,欢迎大家关注!!🥳🥳 +- 【2024.02.05】 项目荣获公众号**NLP工程化**推文宣传[推文链接](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A),为博主推广一波,欢迎大家关注!!🥳🥳

公众号二维码

-- 【2024.2.3】 [项目宣传视频](https://www.bilibili.com/video/BV1N7421N76X/)完成 😊 -- 【2024.1.27】 完善数据构建文档、微调指南、部署指南、Readme等相关文档 👏 -- 【2024.1.25】 EmoLLM V1.0 已部署上线 https://openxlab.org.cn/apps/detail/jujimeizuo/EmoLLM 😀 +- 【2024.02.03】 [项目宣传视频](https://www.bilibili.com/video/BV1N7421N76X/)完成 😊 +- 【2024.01.27】 完善数据构建文档、微调指南、部署指南、Readme等相关文档 👏 +- 【2024.01.25】 EmoLLM V1.0 已部署上线 https://openxlab.org.cn/apps/detail/jujimeizuo/EmoLLM 😀
-### 🏆荣誉栏 +## 🏆荣誉栏 - 项目荣获上海人工智能实验室举办的**2024浦源大模型系列挑战赛春季赛*****创新创意奖*** @@ -145,14 +147,14 @@ - 项目荣获公众号**NLP工程化**[推文宣传](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A) -### 🎯路线图 +## 🎯路线图

Roadmap_ZH -### 🔗框架图 +## 🔗框架图

@@ -162,10 +164,10 @@ ## 目录 - [EmoLLM-心理健康大模型](#emollm-心理健康大模型) - - [🎇最近更新](#最近更新) - - [🏆荣誉栏](#荣誉栏) - - [🎯路线图](#路线图) - - [🔗框架图](#框架图) + - [🎇最近更新](#最近更新) + - [🏆荣誉栏](#荣誉栏) + - [🎯路线图](#路线图) + - [🔗框架图](#框架图) - [目录](#目录) - [开发前的配置要求](#开发前的配置要求) - [**使用指南**](#使用指南) diff --git a/README_EN.md b/README_EN.md index 2ccccf3..e9ab047 100644 --- a/README_EN.md +++ b/README_EN.md @@ -60,7 +60,7 @@ | DeepSeek MoE_16B_chat | QLORA | [deepseek_moe_16b_chat_qlora_oasst1_e3.py](./xtuner_config/deepseek_moe_16b_chat_qlora_oasst1_e3.py) | | | Mixtral 8x7B_instruct | QLORA | [mixtral_8x7b_instruct_qlora_oasst1_e3.py](./xtuner_config/mixtral_8x7b_instruct_qlora_oasst1_e3.py) | | | LLaMA3_8b_instruct | QLORA | [aiwei_llama3_8b_instruct_qlora_e3.py](./xtuner_config/aiwei_llama3_8b_instruct_qlora_e3.py) | | -| LLaMA3_8b_instruct | QLORA | [llama3_8b_instruct_qlora_alpaca_e3_M.py](./xtuner_config/llama3_8b_instruct_qlora_alpaca_e3_M.py) |[OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct2.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct2.0/summary) | +| LLaMA3_8b_instruct | QLORA | [llama3_8b_instruct_qlora_alpaca_e3_M_ruozhi_scM.py](./xtuner_config/llama3_8b_instruct_qlora_alpaca_e3_M_ruozhi_scM.py) |[OpenXLab](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0), [ModelScope](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct3.0/summary) | | …… | …… | …… | …… | @@ -71,7 +71,7 @@ Everyone is welcome to contribute to this project ~ The Model aims to fully understand and promote the mental health of individuals, groups, and society. This model typically includes the following key components: -- Cognitive factors: Involving an individual's thought patterns, belief systems, cognitive biases, and problem-solving abilities. Cognitive factors significantly impact mental health as they affect how individuals interpret and respond to life events. +- Cognitive factors: Involving an individual's thought patterns, belief systems, cognitive biases, and problem-solving abilities. Cognitive factors significantly impact mental health as they affect how individuals interpret and respond to life events. - Emotional factors: Including emotion regulation, emotional expression, and emotional experiences. Emotional health is a crucial part of mental health, involving how individuals manage and express their emotions and how they recover from negative emotions. - Behavioral factors: Concerning an individual's behavior patterns, habits, and coping strategies. This includes stress management skills, social skills, and self-efficacy, which is the confidence in one's abilities. - Social environment: Comprising external factors such as family, work, community, and cultural background, which have direct and indirect impacts on an individual's mental health. @@ -100,47 +100,49 @@ The Model aims to fully understand and promote the mental health of individuals, -### Recent Updates - - [2024.4.20] [LLAMA3 fine-tuning guide](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md) and based on [LLaMA3_8b_instruct's aiwei](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei) open source -- [2023.4.14] Added [Quick Start](docs/quick_start_EN.md) and Nanny level tutorial [BabyEmoLLM](Baby_EmoLLM.ipynb) -- [2024.4.2] Uploaded at Huggingface [Old Mother Counsellor](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) -- 【2024.3.25】 [Mother-like Therapist] is released on Huggingface (https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) -- 【2024.3.25】 [Daddy-like Boy-Friend] is released on Baidu Paddle-Paddle AI Studio Platform (https://aistudio.baidu.com/community/app/68787) -- 【2024.3.24】 The **InternLM2-Base-7B QLoRA fine-tuned model** has been released on the **OpenXLab** and **ModelScope** platforms. For more details, please refer to [**InternLM2-Base-7B QLoRA**](./xtuner_config/README_internlm2_7b_base_qlora.md). -- 【2024.3.12】 [aiwei] is released on Baidu Paddle-Paddle AI Studio Platform (https://aistudio.baidu.com/community/app/63335) -- 【2024.3.11】 **EmoLLM V2.0 is greatly improved in all scores compared to EmoLLM V1.0. Surpasses the performance of Role-playing ChatGPT on counseling tasks!** [Click to experience EmoLLM V2.0](https://openxlab.org.cn/apps/detail/Farewell1/EmoLLMV2.0), update [dataset statistics and details](./datasets/), [Roadmap](./assets/Roadmap_ZH.png) -- 【2024.3.9】 Add concurrency acceleration [QA pair generation](./scripts/qa_generation/), [RAG pipeline](./rag/) -- 【2024.3.3】 [Based on InternLM2-7B-chat full fine-tuned version EmoLLM V2.0 open sourced](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full), need two A100*80G, update professional evaluation, see [evaluate](./evaluate/), update PaddleOCR-based PDF to txt tool scripts, see [scripts](./scripts/). -- 【2024.2.29】 Updated objective assessment calculations, see [evaluate](./evaluate/) for details. A series of datasets have also been updated, see [datasets](./datasets/) for details. -- 【2024.2.27】 Updated English README and a series of datasets (licking dogs and one-round dialogue) -- 【2024.2.23】The "Gentle Lady Psychologist Ai Wei" based on InternLM2_7B_chat_qlora was launched. [Click here to obtain the model weights](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_aiwei), [configuration file](xtuner_config/aiwei-internlm2_chat_7b_qlora.py), [online experience link](https://openxlab.org.cn/apps/detail/ajupyter/EmoLLM-aiwei) +## Recent Updates -- 【2024.2.23】Updated [several fine-tuning configurations](/xtuner_config/), added [data_pro.json](/datasets/data_pro.json) (more quantity, more comprehensive scenarios, richer content) and [aiwei.json](/datasets/aiwei.json) (dedicated to the gentle lady role-play, featuring Emoji expressions), the "Gentle Lady Psychologist Ai Wei" is coming soon. +- [2024.05.04] [EmoLLM3.0 OpenXLab Demo](https://st-app-center-006861-9746-jlroxvg.openxlab.space/) based on LLaMA3_8b_instruct is available now ([restart link]((https://openxlab.org.cn/apps/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0))), [LLAMA3 fine-tuning guide](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md) is updated, LLaMA3_8b_instruct-8B QLoRA fine-tuning model EmoLLM3.0 weights are released on [**OpenXLab**](https://openxlab.org.cn/models/detail/chg0901/EmoLLM-Llama3-8B-Instruct3.0) and [**ModelScope**](https://modelscope.cn/models/chg0901/EmoLLM-Llama3-8B-Instruct3.0/summary) platforms +- [2024.04.20] [LLAMA3 fine-tuning guide](xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md) and based on [LLaMA3_8b_instruct's aiwei](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM-LLaMA3_8b_instruct_aiwei) open source +- [2023.04.14] Added [Quick Start](docs/quick_start_EN.md) and Nanny level tutorial [BabyEmoLLM](Baby_EmoLLM.ipynb) +- [2024.04.02] Uploaded at Huggingface [Old Mother Counsellor](https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) +- [2024.03.25] [Mother-like Therapist] is released on Huggingface (https://huggingface.co/brycewang2018/EmoLLM-mother/tree/main) +- [2024.03.25] [Daddy-like Boy-Friend] is released on Baidu Paddle-Paddle AI Studio Platform (https://aistudio.baidu.com/community/app/68787) +- [2024.03.24] The **InternLM2-Base-7B QLoRA fine-tuned model** has been released on the **OpenXLab** and **ModelScope** platforms. For more details, please refer to [**InternLM2-Base-7B QLoRA**](./xtuner_config/README_internlm2_7b_base_qlora.md). +- [2024.03.12] [aiwei] is released on Baidu Paddle-Paddle AI Studio Platform (https://aistudio.baidu.com/community/app/63335) +- [2024.03.11] **EmoLLM V2.0 is greatly improved in all scores compared to EmoLLM V1.0. Surpasses the performance of Role-playing ChatGPT on counseling tasks!** [Click to experience EmoLLM V2.0](https://openxlab.org.cn/apps/detail/Farewell1/EmoLLMV2.0), update [dataset statistics and details](./datasets/), [Roadmap](./assets/Roadmap_ZH.png) +- [2024.03.09] Add concurrency acceleration [QA pair generation](./scripts/qa_generation/), [RAG pipeline](./rag/) +- [2024.03.03] [Based on InternLM2-7B-chat full fine-tuned version EmoLLM V2.0 open sourced](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_internlm2_7b_full), need two A100*80G, update professional evaluation, see [evaluate](./evaluate/), update PaddleOCR-based PDF to txt tool scripts, see [scripts](./scripts/). +- [2024.02.29] Updated objective assessment calculations, see [evaluate](./evaluate/) for details. A series of datasets have also been updated, see [datasets](./datasets/) for details. +- [2024.02.27] Updated English README and a series of datasets (licking dogs and one-round dialogue) +- [2024.02.23]The "Gentle Lady Psychologist Ai Wei" based on InternLM2_7B_chat_qlora was launched. [Click here to obtain the model weights](https://openxlab.org.cn/models/detail/ajupyter/EmoLLM_aiwei), [configuration file](xtuner_config/aiwei-internlm2_chat_7b_qlora.py), [online experience link](https://openxlab.org.cn/apps/detail/ajupyter/EmoLLM-aiwei) -- 【2024.2.18】 The full fine-tuned version based on Qwen1_5-0_5B-Chat has been [open-sourced](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary). Friends with limited computational resources can now dive in and explore it. +- [2024.02.23]Updated [several fine-tuning configurations](/xtuner_config/), added [data_pro.json](/datasets/data_pro.json) (more quantity, more comprehensive scenarios, richer content) and [aiwei.json](/datasets/aiwei.json) (dedicated to the gentle lady role-play, featuring Emoji expressions), the "Gentle Lady Psychologist Ai Wei" is coming soon. + +- [2024.02.18] The full fine-tuned version based on Qwen1_5-0_5B-Chat has been [open-sourced](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary). Friends with limited computational resources can now dive in and explore it.

View More -- 【2024.2.6】 [Open-sourced based on the Qwen1_5-0_5B-Chat full-scale fine-tuned version](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary), friends with limited computing power can start experimenting~ +- [2024.02.06] [Open-sourced based on the Qwen1_5-0_5B-Chat full-scale fine-tuned version](https://www.modelscope.cn/models/aJupyter/EmoLLM_Qwen1_5-0_5B-Chat_full_sft/summary), friends with limited computing power can start experimenting~

模型下载量

-- 【2024.2.5】 The project has been promoted by the official WeChat account NLP Engineering. Here's the [link](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A) to the article. Welcome everyone to follow!! 🥳🥳 +- [2024.02.05] The project has been promoted by the official WeChat account NLP Engineering. Here's the [link](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A) to the article. Welcome everyone to follow!! 🥳🥳

公众号二维码

-- 【2024.2.3】 [Project Vedio](https://www.bilibili.com/video/BV1N7421N76X/) at bilibili 😊 -- 【2024.1.27】 Complete data construction documentation, fine-tuning guide, deployment guide, Readme, and other related documents 👏 -- 【2024.1.25】 EmoLLM V1.0 has deployed online https://openxlab.org.cn/apps/detail/jujimeizuo/EmoLLM 😀 +- [2024.02.03] [Project Vedio](https://www.bilibili.com/video/BV1N7421N76X/) at bilibili 😊 +- [2024.01.27] Complete data construction documentation, fine-tuning guide, deployment guide, Readme, and other related documents 👏 +- [2024.01.25] EmoLLM V1.0 has deployed online https://openxlab.org.cn/apps/detail/jujimeizuo/EmoLLM 😀
-### Honors +## Honors - The project won the ***the Innovation and Creativity Award*** in the **2024 Puyuan Large Model Series Challenge Spring Competition held by the Shanghai Artificial Intelligence Laboratory** @@ -149,10 +151,9 @@ The Model aims to fully understand and promote the mental health of individuals, Challenge Innovation and Creativity Award

- - The project has been promoted by the official WeChat account **NLP Engineering**. Here's the [link](https://mp.weixin.qq.com/s/78lrRl2tlXEKUfElnkVx4A). -### Roadmap +## Roadmap

@@ -162,9 +163,9 @@ The Model aims to fully understand and promote the mental health of individuals, ## Contents - [EmoLLM - Large Language Model for Mental Health](#emollm---large-language-model-for-mental-health) - - [Recent Updates](#recent-updates) - - [Honors](#honors) - - [Roadmap](#roadmap) + - [Recent Updates](#recent-updates) + - [Honors](#honors) + - [Roadmap](#roadmap) - [Contents](#contents) - [Pre-development Configuration Requirements.](#pre-development-configuration-requirements) - [**User Guide**](#user-guide) diff --git a/app.py b/app.py index f6d30fc..03099a0 100644 --- a/app.py +++ b/app.py @@ -1,335 +1,19 @@ -import copy import os -import warnings -from dataclasses import asdict, dataclass -from typing import Callable, List, Optional -import streamlit as st -import torch -from torch import nn -from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList -from transformers.utils import logging +os.system('streamlit run web_demo-Llama3.py --server.address=0.0.0.0 --server.port 7860') -from transformers import AutoTokenizer, AutoModelForCausalLM # isort: skip +model = "EmoLLM_aiwei" +# model = "EmoLLM_Model" +# model = "Llama3_Model" - -# warnings.filterwarnings("ignore") -logger = logging.get_logger(__name__) - - -@dataclass -class GenerationConfig: - # this config is used for chat to provide more diversity - max_length: int = 32768 - top_p: float = 0.8 - temperature: float = 0.8 - do_sample: bool = True - repetition_penalty: float = 1.005 - - -@torch.inference_mode() -def generate_interactive( - model, - tokenizer, - prompt, - generation_config: Optional[GenerationConfig] = None, - logits_processor: Optional[LogitsProcessorList] = None, - stopping_criteria: Optional[StoppingCriteriaList] = None, - prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, - additional_eos_token_id: Optional[int] = None, - **kwargs, -): - inputs = tokenizer([prompt], padding=True, return_tensors="pt") - input_length = len(inputs["input_ids"][0]) - for k, v in inputs.items(): - inputs[k] = v.cuda() - input_ids = inputs["input_ids"] - batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] # noqa: F841 # pylint: disable=W0612 - if generation_config is None: - generation_config = model.generation_config - generation_config = copy.deepcopy(generation_config) - model_kwargs = generation_config.update(**kwargs) - bos_token_id, eos_token_id = ( # noqa: F841 # pylint: disable=W0612 - generation_config.bos_token_id, - generation_config.eos_token_id, - ) - if isinstance(eos_token_id, int): - eos_token_id = [eos_token_id] - if additional_eos_token_id is not None: - eos_token_id.append(additional_eos_token_id) - has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None - if has_default_max_length and generation_config.max_new_tokens is None: - warnings.warn( - f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " - "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" - " recommend using `max_new_tokens` to control the maximum length of the generation.", - UserWarning, - ) - elif generation_config.max_new_tokens is not None: - generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length - if not has_default_max_length: - logger.warn( # pylint: disable=W4902 - f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" - f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " - "Please refer to the documentation for more information. " - "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", - UserWarning, - ) - - if input_ids_seq_length >= generation_config.max_length: - input_ids_string = "input_ids" - logger.warning( - f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" - f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" - " increasing `max_new_tokens`." - ) - - # 2. Set generation parameters if not already defined - logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() - stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() - - logits_processor = model._get_logits_processor( - generation_config=generation_config, - input_ids_seq_length=input_ids_seq_length, - encoder_input_ids=input_ids, - prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, - logits_processor=logits_processor, - ) - - stopping_criteria = model._get_stopping_criteria( - generation_config=generation_config, stopping_criteria=stopping_criteria - ) - logits_warper = model._get_logits_warper(generation_config) - - unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) - scores = None - while True: - model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs) - # forward pass to get next token - outputs = model( - **model_inputs, - return_dict=True, - output_attentions=False, - output_hidden_states=False, - ) - - next_token_logits = outputs.logits[:, -1, :] - - # pre-process distribution - next_token_scores = logits_processor(input_ids, next_token_logits) - next_token_scores = logits_warper(input_ids, next_token_scores) - - # sample - probs = nn.functional.softmax(next_token_scores, dim=-1) - if generation_config.do_sample: - next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) - else: - next_tokens = torch.argmax(probs, dim=-1) - - # update generated ids, model inputs, and length for next step - input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) - model_kwargs = model._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False) - unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long()) - - output_token_ids = input_ids[0].cpu().tolist() - output_token_ids = output_token_ids[input_length:] - for each_eos_token_id in eos_token_id: - if output_token_ids[-1] == each_eos_token_id: - output_token_ids = output_token_ids[:-1] - response = tokenizer.decode(output_token_ids) - - yield response - # stop when each sentence is finished, or if we exceed the maximum length - if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): - break - - -def on_btn_click(): - del st.session_state.messages - - -@st.cache_resource -def load_model(): - - # model_name0 = "./EmoLLM-Llama3-8B-Instruct3.0" - # print(model_name0) - - # print('pip install modelscope websockets') - # os.system(f'pip install modelscope websockets==11.0.3') - # from modelscope import snapshot_download - - # #模型下载 - # model_name = snapshot_download('chg0901/EmoLLM-Llama3-8B-Instruct3.0',cache_dir=model_name0) - # print(model_name) - - # model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval() - # # model.eval() - # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) - - base_path = './EmoLLM-Llama3-8B-Instruct3.0' - os.system(f'git clone https://code.openxlab.org.cn/chg0901/EmoLLM-Llama3-8B-Instruct3.0.git {base_path}') - os.system(f'cd {base_path} && git lfs pull') - - - model = AutoModelForCausalLM.from_pretrained(base_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval() - # model.eval() - tokenizer = AutoTokenizer.from_pretrained(base_path, trust_remote_code=True) - - if tokenizer.pad_token is None: - tokenizer.pad_token = tokenizer.eos_token - - return model, tokenizer - - -def prepare_generation_config(): - with st.sidebar: - # 使用 Streamlit 的 markdown 函数添加 Markdown 文本 - st.image('assets/EmoLLM_logo_L.png', width=1, caption='EmoLLM Logo', use_column_width=True) - st.markdown("[访问 **EmoLLM** 官方repo: **SmartFlowAI/EmoLLM**](https://github.com/SmartFlowAI/EmoLLM)") - - max_length = st.slider("Max Length", min_value=8, max_value=32768, value=32768) - top_p = st.slider("Top P", 0.0, 1.0, 0.8, step=0.01) - temperature = st.slider("Temperature", 0.0, 1.0, 0.7, step=0.01) - st.button("Clear Chat History", on_click=on_btn_click) - - generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature) - - return generation_config - - -user_prompt = '<|start_header_id|>user<|end_header_id|>\n\n{user}<|eot_id|>' -robot_prompt = '<|start_header_id|>assistant<|end_header_id|>\n\n{robot}<|eot_id|>' -cur_query_prompt = '<|start_header_id|>user<|end_header_id|>\n\n{user}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' - - -def combine_history(prompt): - messages = st.session_state.messages - meta_instruction = ( - "你是心理健康助手EmoLLM, 由EmoLLM团队打造, 是一个研究过无数具有心理健康问题的病人与心理健康医生对话的心理专家, 在心理方面拥有广博的知识储备和丰富的研究咨询经验。你旨在通过专业心理咨询, 协助来访者完成心理诊断。请充分利用专业心理学知识与咨询技术, 一步步帮助来访者解决心理问题。\n\n" - ) - total_prompt =f"<|start_header_id|>system<|end_header_id|>\n\n{meta_instruction}<|eot_id|>\n\n" - for message in messages: - cur_content = message["content"] - if message["role"] == "user": - cur_prompt = user_prompt.format(user=cur_content) - elif message["role"] == "robot": - cur_prompt = robot_prompt.format(robot=cur_content) - else: - raise RuntimeError - total_prompt += cur_prompt - total_prompt = total_prompt + cur_query_prompt.format(user=prompt) - return total_prompt - - -def main(): - - # torch.cuda.empty_cache() - print("load model begin.") - model, tokenizer = load_model() - print("load model end.") - - user_avator = "assets/user.png" - robot_avator = "assets/EmoLLM.png" - - st.title("EmoLLM Llama3心理咨询室V3.0") - - generation_config = prepare_generation_config() - - # Initialize chat history - if "messages" not in st.session_state: - st.session_state.messages = [] - - # Display chat messages from history on app rerun - for message in st.session_state.messages: - with st.chat_message(message["role"], avatar=message.get("avatar")): - st.markdown(message["content"]) - - # Accept user input - if prompt := st.chat_input("我在这里,准备好倾听你的心声了。"): - # Display user message in chat message container - with st.chat_message("user", avatar=user_avator): - st.markdown(prompt) - - real_prompt = combine_history(prompt) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt, "avatar": user_avator}) - - with st.chat_message("robot", avatar=robot_avator): - message_placeholder = st.empty() - for cur_response in generate_interactive( - model=model, - tokenizer=tokenizer, - prompt=real_prompt, - additional_eos_token_id=128009, - **asdict(generation_config), - ): - # Display robot response in chat message container - message_placeholder.markdown(cur_response + "▌") - message_placeholder.markdown(cur_response) # pylint: disable=undefined-loop-variable - # Add robot response to chat history - st.session_state.messages.append( - { - "role": "robot", - "content": cur_response, # pylint: disable=undefined-loop-variable - "avatar": robot_avator, - } - ) - torch.cuda.empty_cache() - - -# if __name__ == '__main__': -# main() - - -# torch.cuda.empty_cache() -print("load model begin.") -model, tokenizer = load_model() -print("load model end.") - -user_avator = "assets/user.png" -robot_avator = "assets/EmoLLM.png" - -st.title("EmoLLM Llama3心理咨询室V3.0") - -generation_config = prepare_generation_config() - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"], avatar=message.get("avatar")): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("我在这里,准备好倾听你的心声了。"): - # Display user message in chat message container - with st.chat_message("user", avatar=user_avator): - st.markdown(prompt) - - real_prompt = combine_history(prompt) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt, "avatar": user_avator}) - - with st.chat_message("robot", avatar=robot_avator): - message_placeholder = st.empty() - for cur_response in generate_interactive( - model=model, - tokenizer=tokenizer, - prompt=real_prompt, - additional_eos_token_id=128009, - **asdict(generation_config), - ): - # Display robot response in chat message container - message_placeholder.markdown(cur_response + "▌") - message_placeholder.markdown(cur_response) # pylint: disable=undefined-loop-variable - # Add robot response to chat history - st.session_state.messages.append( - { - "role": "robot", - "content": cur_response, # pylint: disable=undefined-loop-variable - "avatar": robot_avator, - } - ) - torch.cuda.empty_cache() +if model == "EmoLLM_aiwei": + os.system("python download_model.py ajupyter/EmoLLM_aiwei") + os.system('streamlit run web_demo-aiwei.py --server.address=0.0.0.0 --server.port 7860') +elif model == "EmoLLM_Model": + os.system("python download_model.py jujimeizuo/EmoLLM_Model") + os.system('streamlit run web_internlm2.py --server.address=0.0.0.0 --server.port 7860') +elif model == "Llama3_Model": + os.system("python download_model.py chg0901/EmoLLM-Llama3-8B-Instruct3.0") + os.system('streamlit run web_demo_Llama3.py --server.address=0.0.0.0 --server.port 7860') +else: + print("Please select one model") diff --git a/app_bk.py b/app_bk.py deleted file mode 100644 index 41c903b..0000000 --- a/app_bk.py +++ /dev/null @@ -1,356 +0,0 @@ -# import os - -# os.system('streamlit run web_demo-Llama3.py --server.address=0.0.0.0 --server.port 7860') - -# # #model = "EmoLLM_aiwei" -# # # model = "EmoLLM_Model" -# # model = "Llama3_Model" - -# # if model == "EmoLLM_aiwei": -# # os.system("python download_model.py ajupyter/EmoLLM_aiwei") -# # os.system('streamlit run web_demo-aiwei.py --server.address=0.0.0.0 --server.port 7860') -# # elif model == "EmoLLM_Model": -# # os.system("python download_model.py jujimeizuo/EmoLLM_Model") -# # os.system('streamlit run web_internlm2.py --server.address=0.0.0.0 --server.port 7860') -# # elif model == "Llama3_Model": -# # os.system('streamlit run web_demo_Llama3.py --server.address=0.0.0.0 --server.port 7860') -# # else: -# # print("Please select one model") - - - -import copy -import os -import warnings -from dataclasses import asdict, dataclass -from typing import Callable, List, Optional - -import streamlit as st -import torch -from torch import nn -from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList -from transformers.utils import logging - -from transformers import AutoTokenizer, AutoModelForCausalLM # isort: skip - - -# warnings.filterwarnings("ignore") -logger = logging.get_logger(__name__) - - -@dataclass -class GenerationConfig: - # this config is used for chat to provide more diversity - max_length: int = 32768 - top_p: float = 0.8 - temperature: float = 0.8 - do_sample: bool = True - repetition_penalty: float = 1.005 - - -@torch.inference_mode() -def generate_interactive( - model, - tokenizer, - prompt, - generation_config: Optional[GenerationConfig] = None, - logits_processor: Optional[LogitsProcessorList] = None, - stopping_criteria: Optional[StoppingCriteriaList] = None, - prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, - additional_eos_token_id: Optional[int] = None, - **kwargs, -): - inputs = tokenizer([prompt], padding=True, return_tensors="pt") - input_length = len(inputs["input_ids"][0]) - for k, v in inputs.items(): - inputs[k] = v.cuda() - input_ids = inputs["input_ids"] - batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] # noqa: F841 # pylint: disable=W0612 - if generation_config is None: - generation_config = model.generation_config - generation_config = copy.deepcopy(generation_config) - model_kwargs = generation_config.update(**kwargs) - bos_token_id, eos_token_id = ( # noqa: F841 # pylint: disable=W0612 - generation_config.bos_token_id, - generation_config.eos_token_id, - ) - if isinstance(eos_token_id, int): - eos_token_id = [eos_token_id] - if additional_eos_token_id is not None: - eos_token_id.append(additional_eos_token_id) - has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None - if has_default_max_length and generation_config.max_new_tokens is None: - warnings.warn( - f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " - "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" - " recommend using `max_new_tokens` to control the maximum length of the generation.", - UserWarning, - ) - elif generation_config.max_new_tokens is not None: - generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length - if not has_default_max_length: - logger.warn( # pylint: disable=W4902 - f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" - f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " - "Please refer to the documentation for more information. " - "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", - UserWarning, - ) - - if input_ids_seq_length >= generation_config.max_length: - input_ids_string = "input_ids" - logger.warning( - f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" - f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" - " increasing `max_new_tokens`." - ) - - # 2. Set generation parameters if not already defined - logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() - stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() - - logits_processor = model._get_logits_processor( - generation_config=generation_config, - input_ids_seq_length=input_ids_seq_length, - encoder_input_ids=input_ids, - prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, - logits_processor=logits_processor, - ) - - stopping_criteria = model._get_stopping_criteria( - generation_config=generation_config, stopping_criteria=stopping_criteria - ) - logits_warper = model._get_logits_warper(generation_config) - - unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) - scores = None - while True: - model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs) - # forward pass to get next token - outputs = model( - **model_inputs, - return_dict=True, - output_attentions=False, - output_hidden_states=False, - ) - - next_token_logits = outputs.logits[:, -1, :] - - # pre-process distribution - next_token_scores = logits_processor(input_ids, next_token_logits) - next_token_scores = logits_warper(input_ids, next_token_scores) - - # sample - probs = nn.functional.softmax(next_token_scores, dim=-1) - if generation_config.do_sample: - next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) - else: - next_tokens = torch.argmax(probs, dim=-1) - - # update generated ids, model inputs, and length for next step - input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) - model_kwargs = model._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False) - unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long()) - - output_token_ids = input_ids[0].cpu().tolist() - output_token_ids = output_token_ids[input_length:] - for each_eos_token_id in eos_token_id: - if output_token_ids[-1] == each_eos_token_id: - output_token_ids = output_token_ids[:-1] - response = tokenizer.decode(output_token_ids) - - yield response - # stop when each sentence is finished, or if we exceed the maximum length - if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): - break - - -def on_btn_click(): - del st.session_state.messages - - -@st.cache_resource -def load_model(): - - # model_name0 = "./EmoLLM-Llama3-8B-Instruct3.0" - # print(model_name0) - - # print('pip install modelscope websockets') - # os.system(f'pip install modelscope websockets==11.0.3') - # from modelscope import snapshot_download - - # #模型下载 - # model_name = snapshot_download('chg0901/EmoLLM-Llama3-8B-Instruct3.0',cache_dir=model_name0) - # print(model_name) - - # model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval() - # # model.eval() - # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) - - base_path = './EmoLLM-Llama3-8B-Instruct3.0' - os.system(f'git clone https://code.openxlab.org.cn/chg0901/EmoLLM-Llama3-8B-Instruct3.0.git {base_path}') - os.system(f'cd {base_path} && git lfs pull') - - - model = AutoModelForCausalLM.from_pretrained(base_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval() - # model.eval() - tokenizer = AutoTokenizer.from_pretrained(base_path, trust_remote_code=True) - - if tokenizer.pad_token is None: - tokenizer.pad_token = tokenizer.eos_token - - return model, tokenizer - - -def prepare_generation_config(): - with st.sidebar: - # 使用 Streamlit 的 markdown 函数添加 Markdown 文本 - st.image('assets/EmoLLM_logo_L.png', width=1, caption='EmoLLM Logo', use_column_width=True) - st.markdown("[访问 **EmoLLM** 官方repo: **SmartFlowAI/EmoLLM**](https://github.com/SmartFlowAI/EmoLLM)") - - max_length = st.slider("Max Length", min_value=8, max_value=32768, value=32768) - top_p = st.slider("Top P", 0.0, 1.0, 0.8, step=0.01) - temperature = st.slider("Temperature", 0.0, 1.0, 0.7, step=0.01) - st.button("Clear Chat History", on_click=on_btn_click) - - generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature) - - return generation_config - - -user_prompt = '<|start_header_id|>user<|end_header_id|>\n\n{user}<|eot_id|>' -robot_prompt = '<|start_header_id|>assistant<|end_header_id|>\n\n{robot}<|eot_id|>' -cur_query_prompt = '<|start_header_id|>user<|end_header_id|>\n\n{user}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' - - -def combine_history(prompt): - messages = st.session_state.messages - meta_instruction = ( - "你是心理健康助手EmoLLM, 由EmoLLM团队打造, 是一个研究过无数具有心理健康问题的病人与心理健康医生对话的心理专家, 在心理方面拥有广博的知识储备和丰富的研究咨询经验。你旨在通过专业心理咨询, 协助来访者完成心理诊断。请充分利用专业心理学知识与咨询技术, 一步步帮助来访者解决心理问题。\n\n" - ) - total_prompt =f"<|start_header_id|>system<|end_header_id|>\n\n{meta_instruction}<|eot_id|>\n\n" - for message in messages: - cur_content = message["content"] - if message["role"] == "user": - cur_prompt = user_prompt.format(user=cur_content) - elif message["role"] == "robot": - cur_prompt = robot_prompt.format(robot=cur_content) - else: - raise RuntimeError - total_prompt += cur_prompt - total_prompt = total_prompt + cur_query_prompt.format(user=prompt) - return total_prompt - - -def main(): - - # torch.cuda.empty_cache() - print("load model begin.") - model, tokenizer = load_model() - print("load model end.") - - user_avator = "assets/user.png" - robot_avator = "assets/EmoLLM.png" - - st.title("EmoLLM Llama3心理咨询室V3.0") - - generation_config = prepare_generation_config() - - # Initialize chat history - if "messages" not in st.session_state: - st.session_state.messages = [] - - # Display chat messages from history on app rerun - for message in st.session_state.messages: - with st.chat_message(message["role"], avatar=message.get("avatar")): - st.markdown(message["content"]) - - # Accept user input - if prompt := st.chat_input("我在这里,准备好倾听你的心声了。"): - # Display user message in chat message container - with st.chat_message("user", avatar=user_avator): - st.markdown(prompt) - - real_prompt = combine_history(prompt) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt, "avatar": user_avator}) - - with st.chat_message("robot", avatar=robot_avator): - message_placeholder = st.empty() - for cur_response in generate_interactive( - model=model, - tokenizer=tokenizer, - prompt=real_prompt, - additional_eos_token_id=128009, - **asdict(generation_config), - ): - # Display robot response in chat message container - message_placeholder.markdown(cur_response + "▌") - message_placeholder.markdown(cur_response) # pylint: disable=undefined-loop-variable - # Add robot response to chat history - st.session_state.messages.append( - { - "role": "robot", - "content": cur_response, # pylint: disable=undefined-loop-variable - "avatar": robot_avator, - } - ) - torch.cuda.empty_cache() - - -# if __name__ == '__main__': -# main() - - -# torch.cuda.empty_cache() -print("load model begin.") -model, tokenizer = load_model() -print("load model end.") - -user_avator = "assets/user.png" -robot_avator = "assets/EmoLLM.png" - -st.title("EmoLLM Llama3心理咨询室V3.0") - -generation_config = prepare_generation_config() - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"], avatar=message.get("avatar")): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("我在这里,准备好倾听你的心声了。"): - # Display user message in chat message container - with st.chat_message("user", avatar=user_avator): - st.markdown(prompt) - - real_prompt = combine_history(prompt) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt, "avatar": user_avator}) - - with st.chat_message("robot", avatar=robot_avator): - message_placeholder = st.empty() - for cur_response in generate_interactive( - model=model, - tokenizer=tokenizer, - prompt=real_prompt, - additional_eos_token_id=128009, - **asdict(generation_config), - ): - # Display robot response in chat message container - message_placeholder.markdown(cur_response + "▌") - message_placeholder.markdown(cur_response) # pylint: disable=undefined-loop-variable - # Add robot response to chat history - st.session_state.messages.append( - { - "role": "robot", - "content": cur_response, # pylint: disable=undefined-loop-variable - "avatar": robot_avator, - } - ) - torch.cuda.empty_cache() diff --git a/app_web_demo-Llama3.py b/app_web_demo-Llama3.py index 09421c3..bd5a6a8 100644 --- a/app_web_demo-Llama3.py +++ b/app_web_demo-Llama3.py @@ -151,11 +151,13 @@ def on_btn_click(): @st.cache_resource def load_model(): - # model_name0 = "./EmoLLM-Llama3-8B-Instruct3.0" - # print(model_name0) - print('pip install modelscope websockets') os.system(f'pip install modelscope websockets==11.0.3') + + ######## old model downloading method with modelscope ######## + # model_name0 = "./EmoLLM-Llama3-8B-Instruct3.0" + # print(model_name0) + # from modelscope import snapshot_download # #模型下载 @@ -166,11 +168,11 @@ def load_model(): # # model.eval() # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + ######## new model downloading method with openxlab ######## base_path = './EmoLLM-Llama3-8B-Instruct3.0' os.system(f'git clone https://code.openxlab.org.cn/chg0901/EmoLLM-Llama3-8B-Instruct3.0.git {base_path}') os.system(f'cd {base_path} && git lfs pull') - model = AutoModelForCausalLM.from_pretrained(base_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval() # model.eval() tokenizer = AutoTokenizer.from_pretrained(base_path, trust_remote_code=True) diff --git a/assets/new_openxlab_app_demo.png b/assets/new_openxlab_app_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..3612f3425331c6a2fc877bdf03f08d97536ce42e GIT binary patch literal 46372 zcmd?QRa{)b^Cvn10t5mC2n2Tt?m9?t5AFj5_aV4T2*D+|L$JXIcMB4n!F7<}GPpZ8 zyLT|K6cy`F}a zj-FfHlvvWa@s#1CoSv1n=eHC{&D_~PMr8kF{C7$JCFb7;LVEp=6&m`#ULY>u6*BUx z*QltkP+lRw{wMEW9R=w%E;TBbgt{pnzH|KK>HBYaHQnoI1T@@dl7zHo8ZN>4wHq%> zfVcm~hlGoa3lIl9+)7KM?I0?7ARzDlB`1@x9p{xRCzSx9ey%gP-zLb&JRTs5x(FA}tXQV!n6%2oAdmt1c>0hjhg>I(YT zmu>nl`22TBT=E0>t9ZxPV-7en5RQIJ+ombcR`>fC`__Z!_fv*O>0iAJAN)V2LMzc%Vpsb(lA|9cytlK6@Irj*b(n7^s$#!g zPAlS1&wYi?<(|a}}BB4Uy&7AglZ%V6m zHf=c3H#8|CNhr!bNE ziNW)n%{`J`(LEYdEE>srjV|v@rf>!t1g}GG8I}&6(9X8(nlA4MJ~=G?ajY$deA=IH z{p&dEGtumc+B0c`xTx2T=wMSZ%UjQo@o}(?>2e?A+XCQs z25~QoRSG!!SnQl9*jQ?c>IvKPEb4NE!u@kp^|+to^lYH4t%*yA0d7jc-sEq= z>STP%*Q=@7RBb5~&L@SgewJNBSx1pcG|;k=>?}lN;8w(#^Tj?S&(1h=Qs_i^TfhQA z`J55h^Y8+w9OHNaOmzsn0QL@?*3O8}NJTw}JsI~AgJ*uc;mSQnd^M2T;J`{s&@>soJDZ-?}+)jcb5O{|H< zA`4yO`Y*hdg_?Id=z1FsMaH}iLhRy8%h{yd22O&W7v>~8M(c#|O7T2$gdzp4{^*kmk1evU-h2|Wm$`#IYKiKQ20q>t9|3bUFxH_+kQLFgt1rt+Ht$$ zw$A<)l#^k15aRsVGqXJ>kYlier)VDXiQ6_J1+@Tqt`xQ0x?1`N8vnT^X@Z^M=dQFk zMR)dyeo@bZz}}I<+uqZ`{Z9AYOXyQW!qsAVr$L!|%XVZ#y~>}a2!G_H@Sk`NfFEYV zeKDhWb11|>At=8H>1euNmjw7JJ#p z8Vjom?~%I}>JH z=Dvf^Y+N!i4*RJv^C$no2? z*@20iQb+fG#OEG~BL#Y^Uj;~M7$$RM8~fI~3Gc8A9Ir7vbN`WLvM6))ZBj?P03wgb z#u!64ICnEjr{Yeb)b3VjtWGxGzB=I7%-3j*ad$CuW?Y1%nOfjvX^>W1bH0=GO8%KP zqHUM3NTxsY>1_M2|LEPrB*=57eF?{CU1yhl`7&P9lsem^!LbzBVz7=TLlU|NtR`6r z<@qVkdFXhZ#FVlES3d4PZaQ7#G26*qmrz;F+%kYl25)tHC-;@IM-69h%|F8@smUsg zxhEIrFmVH(5!7B zv3E2yKhVR2XlMZ8;Yc)8|EEc8j%VRQm9(=G3Tgv1GfvOR?9Dir@YwH|B1z7Z5V$E( z*1FK-hI}xie&yQN_5PUXEGwVGEXA_{ofx^x05cs}nPBMgcBsb*~tddwrxWAu=p}w13(!&Nv=50^g0uX{9ghKR!er z4d3}&bcxgt{fg(YAw$EB<^!5h4~m^$8bK%TxI2 zqjtKPWjxaKQhHV`>*_M!0sZSobyrOX(jU*&hVN9LP$^V?5II(Ouo@qDxI3u`sXcdF zWf6729)6{73NbC$Qx?mo^^5Z=jy7>KPaN~$<2X^s3XCuLol*_+)SH?9z>TZBA6ksq z%p0|~72foBkFV1pc2(|zlDU$!Wd~Ssb>8+b^uzrE^=V{A!L;1{aY-|1i^;D`IXA_Y znYhlfj?HPO3oTVcG;71n~&jsYK`%$pxu#`x@7l&8{GVuAD8r{p2mnO8E# zqGI}Df;O6MW0}hoWa9=cKKzfDG%GqqTH{|Id4yg7;f=w9*4eGf>VR_3vIvbGj!Mb6 zMq25@(KU(nv^BcCrm(Cv;zbeB+eG_Ozp#xpyt|3Bw-1y~axrS=1OEu;NQn zvP}54LMkp9>TNjQ=Y*SwSL0pL0hsWv3aiZ_%af{o?RobCDdHu9zz+HCUa zQ4K6Ao^kDSJ+lxHSnfL1n2s}6Aq`#w)uzsDJNfC~&8B>w{^it|?y2&sdhP}A_HYQR zVcO~@$Y%}m(OM`_0t>%wMoE!Lwfa*mH(wubI{s=BzZ6@a6%a(G6bGbrtX_?=`HTub zYUG!!i$?6K-2f)+<-ex1>ud2|$+Hn9|CF_=MbJ_AW zeZs?kD$BYQSbI5y)#Gq0yqfqsns{E&)(OW*6b(Itc5$r)mnKbMy!M zdCb~R;tTgUxuRH9bi;%Al@u7hA?jQ5@7Ixjhf)rDyY|h+s@|4)kjB|mp?&lntFeDV zFU~CMGNV4|>b>mnNs&2(QSQ08>xpbGq=+@UScVi%+-^%z4oGHP>)m_14{8{X+Q#-& zu`RAM87F!;vSKT5Td#*-kIzrOo@SefRXftMdjW84>z=T? zf9Q8n#MJ%iCi=i=CiHe(tacG}`5wVK1qzLlO*LDiwsTfcVxY9$`65$$iPLBIpnE6^ zZdt8J=JrcXxdRR4zkz3+RTcYLJ4Q;Yl>QQ|s?okImTyvRz{r5zH=qm(;+hWguziae z?L8Em+s-dkuIgAV-Rz~UPao*Gy?bnO?p{JM;jIb8#qrU94D>tzEjjVl;Jx1J%sT+b zO+tScsee9?5f<$~=>kAph=N3g z@!h(ispH1R{BK=Jr$q#vHgH|XJv;+E3F$uwH{%rgrv3u*J$kjFWtH)QwP(P~_g6j9 zn4O<5Cw?|0@bn_JCvjzZ**yi&PYzBT&Q6ZIoadv@M*LZ#AG=HQ6z;x^(|suG9lC!3 zKsLW!kgp}W2 zKi7YT>SPX|$s7ZRi|EZ=DtE>-EsS(Y8X=2x4jhuB&Npob9p3X$^>&&%@0q zWFkEBKZC6PhlbdV+u+(Ulq)M~=gCDzUH>KNPb%4TDM#{SNV%0Hgi1U6I|FH@$KS?S zPg@t@#jmWaVUF4$9>@2>%F zbI;K#$Td>F$^Cv!sOcPvY*N;4Y-&v%vV}*rl}AJzz-wS|sFN0ri*L1kwUpP85hzrBD5NKRtq*Jl(Cw5VYEZif`ecLgZE6Okj8D`y>^;x-^T-(YbR(wT z@o8ID_!l>^l{p}fnFEsj*Ix;ghOR}In0p7+&C|~GSr`Sv%*{KMJoPqE&QYWKTJ-#x z@_cBzc7MC`o|d#Kt91kn`PuDKPUq5z{veyMWrrdVKWf!NPv#1yqxp;YICa>1f|Rm~ zgA4tL0m@LS0W3wwV^hm*zcj5~{+<2kj;EIe>UD$tl6>}sr%<*Q{?=+QYHKpW4;=d)rG?oSAyYjD&v)CsDAW`F)nwf z-oQxQtluLlepS8nBlR$RcYT_9e(|0|0=kGaVHWc@z%lPG<1eAUtr32LI|abJawNhc zo1ohnb+{}Z$M+qeWDF<%34{yhbj{E2cVU@o6&kyk6sR8lvHuipZKeE$Zoi_qvgRE8AHQBF{;v-7|O+Tw1}&%yWKWI3Hq?s6&UoNRRc5? zs|A>oFHUN`7v&A%AH9mE4XCl2p#xFM% z=e+Vq{7Tz`E;I*Zo#bAx&*Ro_hWIEk_`HK(y*CZk0eIvD@TX1Sy4xP=vT{rxZM5o7 z{V07h|S5;~|g&KkpJ$BtE zjl$YqLn*d%=}mw-PH8QLucQ8R?#Ss!WD#j`+{)GR$+D9jMGBYlFV&-`6wB1i91rk&w@0SITi?0K+& z$Yo4O+HhE4=&si?t5SP|!8IT2?Ua(w&NlhRw=@M}M_LV}Kk0fuJCw@rhU_7;2@KCf zkh71^Bj$h81@U36@5~kL|7?dRFqOb zR-9;)yTS2^jhn&Wld&shJucuW&+x7(tu$*en|Ihm zdC7J;rZU`iz<>u?IoLwe%rqwSVv;O|t;5`(-%U5SG@tfd@GN4nX*!yuF}r;GF28Zr zukEM3Aa_jeh^awe$*6QgybJ30hW7{!;vel{MZECCp{&U_Ave>`M$9>;yU8Du(Q&N} zB6=Wa<&1BC5S3E5%*1d{Ni)&ZVzlyd4v~_N1m)LIe4P*d=i!}q9X_oshHVZm=jOBw zm+&12-Y=~LM(9Rd*D#*EohGj4$L3$Op_1#8nBMzs5@t+Xp;llCMmypDC21zH5gIMT zSy)Q&uKDKV_ka4A`%eP1|M}de@3Ee9hfC&;lP5m^sZcLT-k5814E0Db3McHu#_z+x z^-pt9NV|?V<3x1P(AnbU!kX=XIHH{~N3RV;i+AikFFYAd^Fg_)_#oFzJR>t4)jzOz z1mGx&9pIfWy|a!oKX6(iD)=3Hrm00WzlDt%5~CB+)xK3<>#yyCJFFT&V|Jy>z6$Ad zm}Dznf_TQ2CmM2dHjeZ{xPQF~Ki&C5O|%{BCbB4tH^qx5VcLeOwJaT=g-0T;xs?s0 z+`t^T?7F96LycK&KbvdebGv!9k+oEiQgG303jN!nvTRatfw1dn*=Ruau&pAh&=DHZ zbb~6yRL9UL&SC1)FyOkD?W(?YGuqG1BXI8*^4F!fKkc3@U#q>P1n#-P^t_tnncVh% z8P=<HfaB>^hIgJE&C`)M#3U9>VHVONJIQ0mZ)O;$9PN!o3FWl=)=J~_9wy%3#^&zl_lS?Mp$+Zq#xyxJwJ#syb1TnVE7qR_|h7IH^U147hw>pS(ohdS^Zu7yN zVua_vlwN!>N?%QS`#09Q+ncfwk%Fl!`9L5sSqQaqp5f4eC^b_+8LD1Geu(d!vpF@p z1W+CceaLCXPT%c65WOvCIgcqG_k5-cC?5Z#iyjp>d#4@PaGkgk6?dK)1CgK{8gp7_ zP5!2#=a@m45zTZLes~DUfVl0(IESbRKwP^hx0P ze-EgaIKwPSGR=P8;LINT4?pg4mxD?hZp9ahL?@1`?P(w29lkkkdS||D={3|=4r2;A zb>k;L?B*Snjd4KTb6lM@0hQvIx6f|&NhT&2TkCjcitYOiP%@nroj0rszOozs` z&}Duh_Orr2bJFqp&5GIH^E;nsR~B%-OQ2Ch4;2u*;kqH_sr1E*xs*N%|9e!S2mTMzK2llf0UrnM8Ty;1*N)N;85)uG1tu= zS35NV`FQ`@i&_X#OfG@8H&`3?&S_C<+9l_Puwn)^-i`g5Z zz?=rFSPD}O4$RfxlZ=1O%#UQeAs%;Lvu3xnBN-bHHA~w6_RoHj)m?I5lld|EW>aqV zxEsxxr|oxEqwf$S*c?m98`1hYq_RHAKZ3{c&!@{rH2t%g;StF@o#t-_V}x}ncF6j;+?oveR}8Pk39;S{;9G7&+5HBR|DYGh!w!Kpwl5 z5?B{rLQhXDie#gBX*S25_Q}|A+K*P^e;l0#1aL+0eNsP@m_h}A$mF5f_q%m(@ zJZr}Lc|oxv;2XFgiAfvdGA)x*NPlUF>=+>7-V2eIW*4qQ`IGSJXGm$PWZmWS4C&1M z1g1~$)iBO!ZEM&s_Y>~i4Y;WA8l?!`lR%A4nP-R(4m z#a5yA5cqx=7v#48cHaN0e*wk^cktuM`FsWbAdfrgzoCbg^8#QrIPCo5z9c7I^3`Qp z0nYz~WBSIo${adpTZ<;$L{SrvG8)NcZ$TO}U_%>)gK9<r=J6?P=7~W}QW2yw6|92s&=P_U1`X5568- z=%dS4`Zyb6>!1JIGW9uoBKSNu&~cyeV-dr5$m=AxLbZy%1pV8eXEE7eWPC_$=wWtE zqnL35ql~@K?Xz`*VXyCc|D~%*ET5~2XFt#B?Ix)?srlw9%=zrr`sR@*%P(-8C2nP= z`vo9@d}RGERYP(K`kC;^=Uj;8a6`Gj9I$BBrreI2@p|?8_^x3Ng0pnx$#&096^?m_FEenpl%JK6Ow@Abp4;jM6KVDENt|K+0_U686GhMi|Js_pqCMOXnb9W;R!4rn_n=~0UWd` zsA92wy-t^tdV3}6$!=zDp)Ck2*32AA0xyC4YMbP4d>2WgV4Mh1GuaC}KIJj;P!@7J zpvhyHkoF&R%Ui=A(U@qxWnvaqxS*zTBs^6SKG?UP{kg4fzrYj-;&6Y5tLzHe4J3 zQId+9waNOcTPEG$D=R$m`Y?+>X)S>e?-N78Kj0RvODd$}NUz2hKm*Rld*9ga(KCKr zSMSzZu8d_=3$~z{gR_k?1GVD(0fvFjF)CI-*#1Dk*zby`S{d`IsmDaE6J!gwKuNdi z6ytBZCY+wF-wys9ENeOPTr|yh)NKJD1zEpQT=mI2#5SwHv6|x^Xt%YNL3dN$y*1JF zVe_um84-^Yq9i2_+zMiRo^9bs5~ZIp7ghS5(lp3ddN^N*&kiKb@zlae3Pv*XRa&0Cu+AI+hD> z299I}rg*@X_Rj`xtcl=4{mxm|>&Y6)<_ag8gDb`qBnE*#rb4lFaOX!z2IWfrS47T{w&BPbkPk{78t zcb>-aL!~rCToc|k_P1$4zliyK>)bFndv>i8(hI8-sHV(oh`!bk5`C2yAdyXsUJxr} z57y_TP$o7wsTZoTib)T>hQ<0beY}glc3&eJ-+~KU3nRLEh35SmU|h);VYjlPbAFx{ zEndKaDY-0X*!)Cy2HMk9_l+XX67b z+q!4ohn|7f_xQQb@3Ty@$*q<+N~fGYAntUj1j+NJydQq#C4GNq`{uK)O2wR0;$ce5 z&Oh`W5+wPa%pTr8*VkECvR)93_SQi}G@N$*69L)*g^Sc(I6Hq0(c(UiwY3HCj;|jo zb_^FpWSn%~!4_dIDdClq+VT-b`XC1Uk7e1(J+7bUwfoz?3+rmolucz=>I$0N(QwEN zWMmi>Pk^kd>{!Muu0ET0nSKM&TSg4z?z!t3eayoByQAmsAOgd z_LI~jfX@YJc-Zj#W*@4$hPXO9n1_1P<`4hUJkd2)G1+r(vz*ToaKvD3`^rW=AWpXT zL?fsp!p4|_M5&*7IFj+l+?IqV94AYw^$#JA%tmPYquoeRRGz2OC8e}fgNvLk8sA=( zp#xujDAk8H$wS;$WqE7V3!zD87$vkXRH!M>*cCLHLOh{=o-1dcc}J6{S^0Wbhr)X+ zr@PIKTKas5Lz~IBO?4IR70C-}%?Q&+(@dt#r z1dqdEAl=AO8{DNCST`Ptc&mRhk7m_n|J_JcI>IF$Y3+3;T~YoM5@_$~F_pY5%3_?M z$^VzG{?BDJGE2NxOT&~EVPL)BA;Q9rC?6DQ>$scG$DkF)SetBESVz8kRAvJybujuy z@$+IO(mzPWzU3T~tE{k~;P4m1h#|x8FSm9$-J69U$=GzGwVaB`6m*3>1s45w4(Pp* z4yKgM$mg-SngBTadqMb(F_0~Oz2Qw&qKYX8i= zMD@R@zz=8x2-#(yrm$XY08p7G64VNoZRDhxbw@mNZCtO3JpUe@@=^^fRE-z=GsK!$ z4?P;_cHJ)&iXvOxc44Wai}VYJG^YDn=Mtr9Xv@S+J}AuOY%o#;jZZgj9guIv35-F%3Dp?|bmFJNS&a~e{-g?qQM@9W z?}UC}%(E0|t316!5U>(R{=qa-owEq8rx?;r`=(@YrjU~|FU@7N?awhjDR)}V$he&x zC~@;fG-sB5;7j#%rQTyL%C(aeVCT_+qW!tSq-xeq;N#t#guMdmh2!uY^J`2TxQ=VF*G^^A}lv0^VI;TeA7Oi-z+p+AhJ}0~D1kvHB2KGU# z+biztvM5Wp&J%ANos!tBl>Iif!xYkFt>KTQbEQ$*9@|inMmv4pWr~&#qnw#T#k@+a z7AHsjx>+WIRv?c?Yqmb+cy`O`krbC_wq&)-?)SBOazabRRe6+$ck?!c!F;87*ll`L zW^EX59Rp76pU$jfd&#q(Gc`5OSI0l%ag_TK?A`Fa>TlK+5xG(RbqTIuaMC;>6tl%7 z+bQxzRxinl$w&np;#sP8h1Z5Rb2BKLW(1M5$aBl9X8(ix6|zl!1EHt9XWtWDckX}#A--#AYuO}XDGi4wQA#^?QD21lIgPh%H@Zgj0Qz96h zx{YVwcl?wn=Rm;`z2L_^cSS039*PJ_arQ}Aks4QJPV61a)ABrQy za`A|W%!)TpCb#I&gmGVMKtEhH{<-zE^0Uxk;{_eqkrNDgDDT&_T~>xVeh@AYBEkIJ zL6cG$OOFL2@oWWWd~}J*l4?FL%d=Yn=3lYP$?x;Op+e%3rk=KsPDuw>OHbbNwJvKy zb#e3xf4c5|L2cDVO9bL}(AQG%!yJh#I(VvGKF3f@a^zgGyZc4RF|yLnsc@g@*UOa< ze=CTE;o5i+3ysYW^*9!TQUQ&V^sTbZ^mY z`A>vym7w=|94JIByha;X3e}!$rT=-tT`NmA$=z#jbwDShDgQp+I6>%_jy~?+1BqtW zzXBx%mL9k}1$;U)+8C}gODUSOO7-H7VOsSIyHIjCuR@vBOmDHx32v#SRi&0t?KFjl zX2&Ijtz)T*RB{D8my#49ipfR{PGM+by0mn1lReVal%KY{hf}p_+frhxtYmz&iajc( zALlnQI`b5aGd%)X=bgU*K7HD3ki6#k?j5IIrf<6OW!*)w4_)g@`$3a$pPo4K*t~@u zB`iP(DmEiTBG6bSaqwh+ZE9Gc7pVGlah0pQ?T6SGLtKwHK2Ng+|2szGbsyLG^M1V6 ze;U!bc24s&vgI!@|LLwvZG{l=INs=kj^oh$_To=d3~}<7?+XB#+A*h&_c6vSxX8Jyh$j97LuLS@PxN{+u@&zYZXF><06Nnf^aq_ACl zeDe9jJq|vH6`ue~J-La7gMcl^+JvwqM^G|E_A$CJ*Di^#J6#S(MQUY#AT++F0Ct-t)M8Wq@g_f z)V8>i*I!{RyRmj2Ra8OXLziJx$Hn|M>6lLmJU)Z1mpuh8G@3l2`fMrpPbS_~8JNG) ze^9l>$Ui<-uiedxisNMY-~`>>W3l@e=33L(mSZIcx4!)_+SgZUprW&jXYHKW$S=Dk znfrz4Jqx$YNq8Q1st4GRv``_QQEEp3M|0Wx5-~uymz9zjtG{>b>h#06=IoB}x26;8 zc}Hn+|8BCoSQgXRY8p!6=~Ui_L{9a0B<)t{N0g-XPc#v`oey-o*auGzYWXWMF97+S z^#%G+PU1;zV@J7-Y|R97OczgJ`VC5r<=!DhpX@^akc!?<9}5y z)2cFORLSwK(b{%%rE<&>*@dLVkuJ3{o!`t=^>+?Ji@uc=gFInI;r%E7YY)?~^u!l{ zn)n%+3p-E&w5TFwW%9erqaKK@;<2;@Wh*UXqnzEGTMnPmDQDW}Q=4^a(h7yU9ByOt zJ#lp&Oldz!jHBxuk)z&y$v+P;Cp>a3n_%}`4Z-W2z10;NT_Ah_o9hw12D~eQbQ`#^ zHO7#y`YW^W-$BEprD*d`em~5=tvdwj=V>5v;Z zT(4K*HWLm@y`;YjR(3s0dX9wr6TC+Ul!6;HOoiEz6JoiLlx^@s|*FC3=m3unxsxGkd%+No?y+5NfozQkb7N+az>#@f}Pd&@8f0A z_IdYEdG`0jY(#RnCM0`%LReHo`;?TEG^#!Ikl-Uqv3{^*RQl#~tHtl;Ia1gB=ABHx zYhF*o#CTC)e6jG3TPldWM1YT3;jWCE%Ij@v=_wEOgFlU$9=M5ZCox$ij0U zd2QQvyFDt;u|I#kh%U#KW7Fhv#=L8h+k|_7_w0jd-B(wELCMEk{w{FA74x>&4V_i& z<(g_1tzC*vqwG=FzmzGrgIpxhQpJo%0gqn7GHCt>1d{gClg z4FM{wtsiCOoGUs!KL->17q+A^0*_H%`^yCQav4V6p*4-XJ+eMD?p-QSJIpR=0cwXiGSk?+3UGt^FEu+28W)(R6dQ03iR;p%r8}CQA z6%!aNP>}XCJ)<@8~VmN$v=w z?ezz(KDNLxV!jv^jC5+%>9+pbLy^NX%XckK*^S?S?_Jw{_=(+O2u%X`B?c8YOzfvJ zE)RcH1x;|F_;m6n|J=A^Eoh^+5hyiQ*}?2ShcHR9e@MWaMXnWRAd^L?+d)@__Pb7W@u2A2%MRMOm(E(y=cpDL_|jH3(VWXDoFaH=bo z`s1LSwx5MHFQ?zRDj(CeTu-vloT;pb#Xv@QZvvSeo#6fPW9es`Rkt|eYytNh*8&wp z;{zi4pQXxvNGwjanZ?ue<|;sTlG!cl&opYsA5`Ae+t)SF||B-nO~|P8k#E$R9xd0c5#LwIutM&LJ%Ak20cats@cvXbJ?`e&J=VR7m?mzZ;GVN<& z63#~1@XIULX()rYh>Bht+%+735vI^@jI-XyWe$lx{llz+4l5memI= z%<6BukN4d=<+=jf`1h}RTpn}xa17y)q`5=xQh-f{K(3Dw!!WCCw2F@@zJ_#G82Jj3 zozf|FI@&nTp#7;(On!3=OT9taZ&=;A7(e*5%1N4fqXQ5DUEBX43pZr zne-VBX*Gteb*Pm^-@k%4Qf$F0w+6XZXXMxEmV+0J`IF3*TIs3jhV1Z4nbxMS`tJNA zn(EZ5S-q-2Edjbf#dwmEHL@|Thpqf(_9ktx1_HOMB`IRpXMD2T6Gh4J%6J6X{)83q zmAw44`NlJYb`06~fEVVYX-ckJf4#D|$@U@f-fGHwGm(ww~Cy}A*|dj8<8}i9*5K^kHRmjWQXzwv|oz1BU7_a74Sf0 ztfEB(QFo^)A_iyuNYW&l?2edI{n$gPb^am9rx)cIInI7YF);Hm0=?zAj`&hN(0V~_X)ETvv&>y1pJz?9vMV@w6eFW4?ANL5Y>?!wV=>;RS}voSroRo=ALfwAoWZmH==K^K;6 z_t}hftsUk_Ecwnz$&UDxVip%zjNWNTWy)DZiEXO(jyW^1db>dC9Gif<2Ca_DvV{Z3 zCHd<{jtwBngG4CvnE%T04_PDFfx5v3Agm+87Uha=Xdntc{VqP5d+JK8p{UK?lG43lRpxK_)j9v~(kRi;`#v?m_c z1)hoQsFl2D54zD<9|KPNra5bi*}jb)q5P7(>pFLyM5XIR*5753KFOv7e|zQwf6b>` z1dD49b}na~FOH9(d&pcPdKA13q6y*1mI}GclBsBrbtJ(;i#m@?c8O3fO@3@8jbPaN zBU$Qk;0dH7v!Jhiv~m2%<+D2e>h;L}L^McT$9Ip6d(-CBYt=(Lp&_n?u!D^ zT#dMSbK3ZFt3bv6$>pmM->wYm_gh&yXyzS5t^?V^HXp)pll>;cO?T~2i0})P@7&DZ zKH##1G!%Zr!YK1pY}tAK zI_*R6C$BZdUH4(&1WL@FP&zPK(V`=%&L78=b<9u8RP+1>-hMaYL~a;*mjO@{CNBTI zBdU5@y%$h>`I-##iRMtvU(VW_(kxz>2)zc^I+wkEL z7f|tO`1Eji$&z8&yvaIWMS!mrbnU~YKmrYy&RkPD-zhc?g7MDQ)LH@pEkXK1*C z<&HeXsVzCAiU?X{zhSR_E;gdI4P|F7p{3D-gon}1sGshBX5?r6xRsqSs&_sJOBfNh zEjZr-LSaWu%(?4jrgmsaUbA0T^bds4e=8;}Wl~|!>XuEiHhyn3O}=5lc1$hLd_!bR zM_D~I8)3!?LmQ34*7D4+bNH}JUJ&}H%f@MfrgOBU+DTX5xkwJ2qLHw$~-I9WZa)c5L2Osc)E< z(PrbDtK@IciY!~f=t!cWnaV!>-*@Ha*>hTscd?1D|K2GY(b#6b8NDgqCOG~Aplo|M z%{Om13-Q?3+nYYn*6?gqc9~?FNBT7|RIi;+9(sGhC7~w}y-z(E(VfUZH)qy9mq07; zU+mmj)poaCuRm?16&Se*ygz-wwUt>-@5Xzz`JPIEMNaCQMp<-+(G}53|6kU3x7X#; zUkmcy;Jn?A)2mF*MbTTI$p8JF`i7>@F*n*O!_|{)RexHAbXo^KPhtzaS9IEO8m6Gd4 zt+pR!N7ISTNi288c^kvqc&tCSD7aYmTa;dqAJW$D4EkuLs<$#Z);r%IoR=Q3CZ*gw z%8kd~Ppb$*lBCzoM7%0B`gDk+|Byp_S^1>c)R#)_%4^|n(=wuEWd>zE40hVuv)Yn0 z?4_f)xBzMCSGey`q(A-tfh1^}T@893Ba1lBy%f2(-fo^a5C@jKQ!We$;oh&#vDe(d z!xWx~C&ZOQaBeuDi7c0S;@Rg7F{kFc{!*X~#@mvd5*=`g<_gYfbgO@&5>^mjVD9tB z3&T2PBr(EiIqh|f^lcu>GO(&YaFQq!_x_Sr)1uEbC^(S)q0RGY1N$*BJTz_$`ASn~ zfxI{@BXj_taxDCE&{^to`0`w)oXtblckw;wLSiB1Z1#2Sz9VDj9{K>6@`FX$(KD8Q}h*X4J@1;6% z>1wIk4P9{5F2_k_Poio~uKvQ=CS9YAqh{!%#pxYE^U9tDxr{%0dg*KG_(zLRVHMm| z;LRbSvEEJ{R}GsE4` z#!Pa*ydSl3AO1VSR#> zU3k+z+j^4%6h_1><73k%0=0hY{|V$(T@*pwTuc!utqi_ru9~W1&9`7ioMekkS2e6G zKBOA%9qZqR3Gd7*E<@vCvaax30HGvUaKK=fpGmV|ff^|_-95a1L?`5vUO6%bVY(SK zlI`9teWzA%x@G_z+`8JY5M@YSiufw7=keb8q@TFM&6Wht z@$*ZbKiwz?eY8x>b^8&2p{JSAbR7Yf!`V19qu!z4cKJu2@voO9)63@3DkR2AdF+on zLz0Xp*?XCPpSGiszgCIjB+a(8n3;XjFBgrpSCbDCIIJ*Kxe&v#-dH-zc|vXis_fyJ zHG>lc3Q-OOF3#!u87GT8pvEFMbd-2!UVK!K5+A0PJFoB>Qsr^C-Tt0tlH)P^!b&=P zoVH_JTi$j&bv%Hyf15=9h+V{va3G)TLXXUL8zdvHnL1vJ{FhRDmHIX+f|YZiux2~F z+V;)JppoUu(x#Cq(iYd>ksNOu&oDwu`>`%sVdC%n zCR?(6WKhHZd6<6!K1j6`Rudds%EvKiy$R^~j#-&abrfZuYiwe)tK+dof zbkcH{T&ctRvn@l=sKJb=cZ`d>J22tN>9(nqv=vbY827A))@v_BGxq3Qm z*;~S8?r#t)k45#QlKh)*0?kF5&rEI4O2s3pEs@r-e)X{2tS4{$e;qtMbWNfOCWezx|N`wPM^+lk~KNS{xA04 zGANEXT=yg)Ap{K&+=2&pw*+^04eo=x1$TE3Ho)M{4DRkQ$RNSpU9+6JRl8^FeBE>R z*53UvRsCVAy8GW<)$jj#pXc|Yx=bW}TPwX$%n(A`{M$+AJ-y`Cc0c#uXdG_tAT7cvkAGAx^BHd7r{{UW+s8&lgbr)W0Gb}t}UzhL(kO*!^kcl?W zbZVur%Ri>I7r3YMD&}rE0@j7AQ46lFlE!|(?J-wg9JK+Po7GN>Aq(mRICa+I<_nli zOk}(cLQDRi$c8a0+tPzS*ME}Dv~VBSR3J*LCP*uDot<=NFvE2#|C>udm?nHP`KN(K zW6#&SXDu*bc9Pqist972HQ1_Vu0{`iSI`8J37}aDlbPp$;-6z8Ly2f@(ollSf^2|Q zcwr6gFYdw)h-EsErYT7q&(bMA3--SSHrHO&B4C%GRi80;MqmAwcb zvnnC;yh|#)5wdGe zG#}Cwcns31!*zC@H)g?cdC#ve&D3qzlcG)GpD~@5V(^`ZG*8>B4QvHMc z5fz5)&9s5f2^@>ycEgd8KiG zlp6>Co$lweuC6SUC8Tz+7Mkyl{Y=fF-=iivq{m+Fys<5}Du}u}1yW-b%Phcedc1`+hFhf ze2W;KFY@2dn)rXi?D$`tlk+C}hqHi?hi#GZp_boO!A%4xW1g+YEU1Z|`M-g3ITf}s zW<5#tz=jBTt90@$(Y3Vn^oZ2;bz+BuWUqHJKWrRV;W751_>I=iOx?7!b%T=7Sl!)T zCp|os{Ky$Ww-(FPE4dBY09-!%nA{*8f1_*>i{Eak`jj&gL3+V#raMdx}2FJM%_SHIarH70uW(nspa zA6u1ASX0ULsF!(%Xfck}3Og(<{iz#PFWP zE*Aa|8F+W{AxLRsck9#bKS)z8p5Da1!Z!edjfRhWqvuP46QZxXZw zloZli%vpNH3y39l9L>kA!#cK3jc@PaHVh^>FuNp9B0_ zwm0u7FtLv^jJmwzMj|EEmkW_1nRs;rL2`gH1rqc56aBGLb6}OG=>oRxk%X;6TmD41 zT)_B(i!rKFL8F70`T+gKQOrBSpf{jZ4=U1ic#hOtW#1KWEu!sYxJyfillVq1ifD-7 zozK3AJX(8vxl4#Reav&q80AB^ExS^Pi)!}W?{F?wrCnLiNXIhi>zJK9XbxJj77Uwt zU{$HPmT4gvZ6f}uaa0YH{K$yr+`(--|M0#o)UeS@A`3pAf5DfYNKt#M1KfBMALu!{ z(idAx+q7<+QE_$DYL6rg);TUe?YF+YNWiZO^v@UYW;08?t?dliZ5C(J+gs2Ot`hfR zEDkM^H9oUif;A4qMA%ls%%oa-!7vShEMpO?3>J;F^j4Om136#hR(Fy{9b-vg_CGj9 zuP}44o||3>*8vhEy9OjPKXpSGQ-^P?(GRj5A0-;UlHaJwUs2Pf`^787<)yBl{p9Ac zMWuC-!uPBsj6kfUlV&uwx9X(OTLYbt#IJj@DlWWv z$&2)R$h=+Qz4$Ba7qb&HV8>Phy-fwhkU?uq&C?&A*o?d94n%hng5Ih%{6L; zdMlnTqi)&HiXH?3KtLe3kV(;VW7Pc%tZHFmDa~{g6s7NYlwQMFEe)CLi%O|msZ_N& zp-2FVR_)KbhZ~689EkrpW`@SnI}Ltn*c%CBA0!Qnt2gl=VZsW*bN^X$&4Y_I5$^C(hv_Y zodE7QjLi8k?OT(QAZM$469KYWnF}Fmm=l}!?yDFnwJ&vcI!=_=^o#a6i`yp`h`$8!Pf`++2PG-=T z$+ZrH1POncOmS%trbxxp$+?LYi-S6#5p zxpgqEDfOS?N8GwO>SP{o$slZ`u%_D3!C^8khKM}vi{Enc=JO zt{>ADh`yjXpQ&@k`(Ak2!C3%p(|dZK~*~^sf%+GF4K{Q&;0FxwcT-PQEs(yGrW7jGI9c`>rjkkVA_a-U-fu zH(umG%YgHL*qK?QO}4W4c;7TV)~BoN*JGofMC&svLFLQ4GjtzMVwMU*yv9*{M^kP9 z=HmsmctPYlI^RD$s#6!y6hX-+QhC7|d>@X!YmKLUqIwwb=OaF7YscMpL-MxB7L;lq zHhSxqmtVbRh%+7=Ot$ zZSQf*sMg7T%}%WL7Uq+6Rf4hPK?KD?*}5=lbH0V1_IFKLZIyNM%xaC$CuA{PKXOim z&iCR-{a9A_xrggcvGdaE3R2co$ZS=`fanR95wD~-cwCHi*{OAXa4j<#0l}Zc) z1~^arzpm)V*@Mc%x<_&dP(WH>UFsikx=u!_E?MW!`sxMNwl`&2(W?cZ)XakA1e2O5 zvWNEYm(1G%My6q4`pXmRYWxToM;DVwCNtsO@#*&a>#lIwm=lAyF{5imvF8FdVS$j| z@p}eKqnHr1@9Zk!3YJof6oi?2!9D%v-0$g?U6SR>aJS>BMIN3_k?3!lQa&)uH<#pxJRczi!>Kd6EMKacE z;-F-M8S*VPnVJkSefHpq=Wk--5&w<*Di$12mu#bPuR>j)o39)6OR>u}j>Q+ef^=7ZQOjS7{gWF=Uf z3e>y7tqBH*T#-?Mlma5(mV#ESx7ChFQzeprz94meVq9>uSzcXT9@&6jR-98^U2Ud( zNd{D=%vPR$3LEQMIVFM|t7y9+UFyDjB6bq^w9z1Bdqga-L+vcFobX;PgO%W`pJ_Pc zM=IwDA}9Vro*!3ipZ7!C)feuoww^9?ZI4+`&Vz{{JxkMYx&BPEWnWNAgNc_6Dd!Re z3*DN61b*Q8lA(h!GB_)JTq=TELL3G-a*Up@;B^!R(e5!tof6{4FS2*K7F6{N96K_A6_?Yh?ywaDXU1>5(KNR}#Bj)`UtwPE zD8;Vez>?*dTQbG4C~``9-nat6cYPaeb7?M~<0G+K`dq;R$Cd80dXkv{+Rq6D6UXB5 zqi8&9Go0q_?~4u5wXwf#*LH;s;3LUrPMSd$Nn^0vIA%i`o5=?&HOL8%s@N$Te-E?O z6WzD7*_@sF zvni)*FP?leG7!Wc12+c&s{6^E)S(0gqI0uLV#Nz(#YB!6w8a?oWy<^#Og;vpPk)nIZ_E7j1Ovo1<;ukTkYIZjswstmWi0G-_TW%Q{a(``9OifzzjQWEy! z^xy%(S4o}3>o$Q7K89*T+(ibQ)|g0rEwSl20#?Z*J0;!=c$O@DlSWO+h>GYMj3zAv z?iTRJQwHq^QR30HxN$K zcM!7G$B~S(X5tXtX`-sqOH#Xug6;@)j;V^-MWEpz6=EY5GIIw=L{R^;ipq}KQ28dU zBqqqxFLe!=>m6CS1I*7N^i5eKonrhb9DWH@Qy)Mq!zj4*9jmTcBDB&+;=IujAC zq5vNcLABu)*2Wj#7puhht0q%;s8s0Y)^flRUJ3o`s zN3Ts0pDCY?cDLPm!bZ0|GeLNd%c@jl#%*bWJKpku08*-B3pa1kkbIP>j{=;K9idJQ zS~a`%(Dh_cPg|Nc;WoN}&aNgz{0EJ4ee6{jr!VXas}JY`5j;9NZFOe5&(`E7rjP-? z%L%G2>E9KtQXJF29cF#AcD^Y49VIgPt-HKDN0hWY($rq=LqY8TL#ieNy64Rhh6Avx zjTGKIVSCdWUo+aCaO&6n_^4Hnm*P6M0z_!kYzc0e?ol1+mj5%Vv<$7*o;=ot&^2md zbJtFiS9KaErX(be{u_`Z*MKx)svEGCMNL342OC3TsB5jlIg&Ij+$R#azifSG#jzl!6T4VF2d_g_?5S84ucS-UfDgq|3ZR@)ffS%awu1?g1GD-3u1C`th9+E#jP8 z?U8qz|0;zL9J$k~UAac>$qB6Th$*-*=c4T9@Fxq7{*3`CfLt~_JwmgZarfBd{C{F_ z+#mzF%-W6(d?VBBYRocbv!krcJIx-?l?n>3uO9E21=lPwoMQ<9J=a@j`^5ob;zA%-HLV{~_=W8M<0Lh|@N3J;s)!`Rj~PUmuoM5S`DJmK3YZ11KvudIn6T()rh3Y5#fI+mKqv$X|6rA=Zz$Bkig9Z+jg_mu&^8VQQnXSp zs6*x$RDVJdeAYpUa=sp7U^}s|+qEkm#F|rve{5c;vCS7o>2Wf+VE# z=Vz7EyWV*ll3V`B{b_BVO!mTHR`iOu$zR*Ouip<23p<+C=}(BO|D?E(wY%`1(0*o4gW)&g>P zIzkz_4f@qQM!cVbYZ|J5{wLYwv_O1@*||6MS)?c{U39E1q9<-+ooEk{25yJKu`kLD7wR%G{l zl=`IKzskw0{_g$dRhm3mP}>MzAY!k2wx9K<+T-=-$McJIU8qf`NG5J&`!7J2wmAOK8({sb}6I)Iu!5L?tH%~^z^<&D+fpobmUB&A6wuUq=5N6m^)YM+=;ty4Q zVPMARVlOKECUZ-bd8>Gj2+yRYbR-my0Nsig((|JV{az(^cSFH700-@hV~dk7<4#R( z*j}PdyM4jv{;V;pNY=FF;Jc;rKB8~|^*^LyAtZVX>2v)ap~1iEaIA9mUDdXF{VD&p zJ<9v9|AQkj6wdJ5PgC+G$kA(q-lf&Z^0f5-+83dy`7A#F;vDw(>swd*dH>@l>FNCs ze_Q{-u@T+RP}pZ5L!Ceuo5VsP`)lUp%&rYCp^xpM(3h|f>@ueoS0?+BU`=^hz>7MC zMnU9ZK@H|ONtRZoVZpWT6g_Q}*mQ45Xqo{34@$n~l>6_gp2{ZK6cb7I&*6wu*IBN* z!LxL>(@nu!Gk5kW=oycH7C7Jo6ZaGe5ap`?5@6Qj?c`xJLE8Ph)bmLxXYZ5bLkk! z2sz)(dcB{G6XZ*AzEOmDxbBrC93DD9c1oOl0Ml?DW2eMmRF0ZxBWh{Hr*V0_SWP}=LgQDbiDY}OI9p~O^GG%t5U7WyjF|(1-p4>kwDye zD&?$QU%-XMX6>=)BdP*f;D88r)!b1IP7rRM3sOZ8P`6S`XvY?`Q83%7GG+U;&uo;c z5hRN`L5ofI(c7vu2&f4U73Y(Ys2dhWWI*`FN*Ud*W1=laWjR2pkW|ia_*`P(u8}!# zKdW*+cvj`P<8Iwac1B`oV)wt>N93!~T&p|oM$bUjiLZOQ<2lgc_&cv(u^-6!!c4}B zc-T9JKx9!%OnUY@e6+49413SihPZ@e)WcrBsgIX`?G*=~#zU;VczgPaU_NIhf`2;O zk23W-e$+{8-Ho)R=`^#G7@B8c*Dye223?)yx9Vs$-_UfTdk(Nokm!A4{>DU;my10Q zu4U0T_xpHRFM7rqQQ)O5_$QvW^wdv+Rnlms-HV9o;Va)E}2I^I>rd>O&I(k0g?Sz95Xet=&oEC9fuW~ zAn^&u?`36)Bz{FAB5K}EVvCHfPjna6Y?0Pxr-)l$TT`Khkqc9=!4`oW#sxV9Z=JDa; zSbb2XmNMsXb6H);o|SiQf*UQU;`TRwAj}|yHqHnt?vePKEIMovES$b{W~iecQ})?W zN6?lQLOrl9d5X9>Qu*BQ#;j_ar|%~q(8YEBJ;qZ#MKC&@Zp1B#_M-Uv^r0=&2m7C8 zE*Kj_QYMEc-SlildTmBmq%qM$<{ZuVzEIXHms#_FaGRRQoyf5sABz4bvr_+K^aYxt zAHyu9yzL*{gsXx#3j8`($I zjT?p7Ke#G)4<{mf0RDAWbHxkoK7rYgZhK^p*Yx!7CCeK>$0;u{ha2AFXBD(X2Y2rB zo@aT13${$bM051<=xsV;OugGRCg^HXKHxye-2F(k<~&z;pWr?+&33P`BS#h49NQs6 zy}5VTdM;?gCFRc&^$&r)Wm}f5P1EROqGvXvBKxLT8Ir5$`g+%dH-K^8@3XnuwQnhh zzrFvkveOeamq4rI7Th+~at4kmso*o6y5S2Pn5X6~z0U$%v(_ovV(DI=hQZuc{)U$+mNJukHqo?n>5IVOxi~K z6mXi^moIG=vM%dCo9buq6;}+R_0TTISluzvK0~&Mhjwc`zn29St8?O7+9RS~@jV+? z`v*tsG}-rko*Kx!7HFv!NA~cSljqlR^Jbhu9lJHg41jUuMN=`5(r(J|TSDsQ&~MCB zOwzo})QiPGgB<_h4slr5cYXfiC`xV$IxS2=IFItrmt!9Pkn|hD^3cY6ge6SXocpT^ ztn^+Bke!Y>2uKQrC8J6r;{L<alaFb+A{7N#>OO^rdmWp;SptykQR zEr+C5wvMy`IhV)Vh=6PO-~)rIHW{6KsMeiPBX&#AW_#fbX?xK^X!`4XkAGTV)rp^X zyh*p~W`_Py)^*Rm&$9RK=#*^^a?7#Jm>$~`ZjwQMz^Oj=8!o6SWsQ!aVrF2$)2hQ= zWWOVS`$|xMKO=A`G$^V>Ztn-e^j#EA=VbY&af4Dy6yUrw#a6(%*$byoyK=fEh=_FT zgcZH=E>Gx^!OOf9zDUxo+kn&}Vs~xU`)bduf)y$KM`hjFl+DA5sdiFi$EkSaESv7B zdRFiA0!WAT*R?OK^fCJT=4pDF)5M%Y+7m`uYsU`TsP09Fq1dj^RZ)vzVCw*yVGG=0 z>W^j7AWG$yU_vk37^Aq}!MFwRq|BB;&p)^|Fgos`&nawD{!+$X(dUF5Tsa`uT(az$ zYyhGtOxd9!XOpVHBjcU+THsydr7@rtqZauF?p-QNmKZX<23@m#6*84o^l9fz@tNUl? z%*pMiV+F|r5j$l@O{6yOnD4ke&AlM>!!8K3Fn-p@FUXw2t?iOoCROi~?+>*hW0pp` zSdRB`S@$K`5;*AnmSo`jBY0Q;&L7o>*BBw@YIMD{0a1JyVY(kt25bUCy1q*(bzjQ+ zqG*oVvx&Ls0~nL~nMrS%>Oe=^)h>Hq7fg!58Sxmr22{$U|?V?vq7%^ z&&O!|-&L354f+rK@(1F)@h7oOp{(l{>7)lCovR_%5J$q6mm-poKIFNf16o8eDf^Y?y1hi5vZq_Xm8NhjQ}erBJk$?gQH9{I=V~Gen&WoA#WUD#OVl`G zy@YexKJHK~lkl0&?zY@BLb=p#IU|Lb?krss-z}^VtdBSs$=_!-a%A*K0JOuvY55;n z2yqR;$|d_(|0`0}BfT1Zr3Fz?Ec&CA{!U<>wX+<4UFbXfq?iv3h(IF?D)yYaPz{P0 zyBNI**a2|d9^=!kQLDW0*lq|k_;=&Au!!Ve;^mlMu`))nR<;<*tyqxv_}5~8PRj5? zkG_~{%F3CRZgGk@j0}WO)j8~NIs!c`K8#op`D&6n{|PUL2^DTqg1+*Hd-t-1+{hYr z_)pxqH9G^eTl;;>*+YNIeT+P{{eAoCQDk7_0iiD@mYgR;QQkAgX+S^nkYs5)-cu8) z@kS^>C&m6mU6bMW;3b`;Y0FP8A$SNEUcKrJmIounh`Fq&&KiWOE69L#FDHNtDKwGYXS`yVmk4sqp_f^rij39-29Q) z)l5(yPCz|gPUDd)2)@s+GL4Rv(#J3YXZ*5S$x&h2re@kw!vwHmL;h<~3VPCZR_v^D zX(N`m%z&)!MJiLOrr)dvl$m8MTn43c8aPR~TBv9{`*JRY?6ki4_RukRqO16e%L=CL zGv)Pn%>_At=2Z@$Cuq8Mj2b7kk&~pkcB6Dx06`=Z<#*X|vLbytrkq^0{A_Ch2-CJ0 z48J#P@jP6zn_aGbo62^yb7TKDeA9muW5OU>1L!MRB^qJ#j&eEn{o!r5p}0&k{3sGJ zSU-M*K$MaXq9vE5!adS;kO?8DcV|v%{Es*{*quNE}OZwYH*T2aC_-YtUaHQ9n(zgfbZWo5x{ex?;!IK#s zv}qcsV4K`pGh&-ac*qr?EUGL_I^7i zRTAfe-ch1-&?4_J5k!9AcF1xa2!P6WI1~Qm*z@1dG143ra8XQj-N-Ez_VK=}M%j05 z{A$$Clo}g1b`*K6@rXs^NiR0X!;NCwyGj?;B9;x0^@6j@Jl&pwp~E=6eW}V`+tkE86NNK zehmG(XZ)_?C>uv%};;IsX?bDy_ZGuHy*EFV)zw=1e`0j-qa`y?>B~CV`P=l zMF*8CL>obU<4Q(?HhIE#&>SX%*>WRs`VqM1c_q2x!{7*Iw7JlYVb*9|6G&}baJxJu zSgkk%;vbw@ygYlC|AV`?@%jh1>eTrp{OSOr+V49R-%1qJ%p1f*d;SMUwBse)2oRzy zF*eFqA33&R#hcMi`r0%%wF69zL9?}z8@Ba}H!y>8Kkz>JUsOW(uq?Kh87@u5A$AHg zuv@RWC+FR+7@R$as^wmGbM?==E0s;;yn;ZG#x_0LNbw^c2*ehaaYiu{o>4aVl&Q+2 zFfX-SFh59~JL6Fmy0RDle3!TW_-=Cjd_efY$m{hVT&%GFS%b;jrA@y5{0Hw$?zUDh z{{yqyd5yK+N{YVf*kSUvMDB0vBmC;We(@FseAv+hQ@d{aevDKSDn2lHi;GvXEV` zaG5%gv&Zs1lgl*#DO3G9LOPfBb@6a@o5 z!4iH6c1yl(EzS7%|KLWNskOlJOQjnAhq&b{G^bT|*iiX=5P4sftCLLvff~N6=I5rB zgq1>`JtUe%}sbvx4(EM zr17b6_}75O)70wGJeJQg>eYz)(|l;4NVnDZIb@2v-9@t{Wri8QCSF7(GeL|BMVxPK zJeS1ba$3U=S?|h78m%a87Ks%aF-KW)G@5%n)tUpunU1@DoJgF!pe`}<@n$IeB=Y$4 zJ&Xn7=|6%frF34Ui^?VmkPg$M zNE70^5UNVvqWa0YU9l||9=9Bu|C2*>SRj``*TAj@H4=k{OWZPJz4X*4djkGo3P1L( zb?JvIA*WiU=<_GWg19}L=RGLA9`f2oKpYxltG7q%B!>V~Axi+S617wsUZdEF7ib*F zJ+);c_37LGR*tsV$m!p*Xz#0wx9s6vHwQ2K*famW%t!Bun1TK5G5x*h+4fK-*9Lyc z-5%rhb?IT6A2kc~{Wg$#E=2rTcD6o-v7wfbdVk4xFf6UHFs@0~2kt2~bg_st--2mv zR&ElXyy1$34Y$|zB}B{?UN=;u@fsnoc%&CU;D=`|OrShy7EV13j{Jxs%Y2*|h%9z6 zL#=7h0e*T7#lRvVl`}aGd3Pwpkg_gF?qB7q`$i(Hd`sO-k@9XC-@7{;Wk)VxBo@&S z9R>roJr4*BF16|s%qxW8HYF2F9!vJ{43#vUzv+;T%atk@{t-}sK_|Mr-8~b7G6#^DZ=#L}a z%lbv0&Y+nPTpZ>w>HVD6Dow?UOo~kj(dO~0t6yv0jkAt343umDWn_Z|8l-rv8qg?p z4+21!$-2Gz90G(Lb3D@ zsaZSmyl67gwwRs0&eEUo4!L8Ga*b+VVJB&s!CWYaTzwB37uXI0?xnS+*yMx&)_CV* zS9}%r4wDx7)TX6l*Lmy{CRU)(Q|(UhYHZ=>jf3p1-iv}S?wOt+V&S{N7KEw(x?&yP zszHW{j-}qQVQixOd{4{B1jiY%sK}p(34b8)4m-L3CSmxV)wVmfXFKsdig@RL>7wC@ z=4@L~lk^3)=70CApyx9GqQo)Q49V7wsxM>oz|pKPc~pgbIPyiwL~$^Iczt5@SZLy@ z>59pw9?;yRyRKiGy~S@Rhb?f%y-)U0a@k1qYm`=gEypVGE=Axs{sr0K{9vvZqeE}H+rNs^dOp8kV_Ha&7bci!p<`w71lJ-1#D zIPGZ}I#Nl`V8<4D$wKIhik+R>5nv#S*jOO3_#~2>N<06~m~9K4SI@doV7c6GZU$VI z_PODN5z{J3V@Id#7_@`oP!4mXllP`?XN&4Zl6OOMCJnz*{n9o$4pKK(jYG`V`Dk6r zyLU@Wnal%akN>@IT}Co&Lhy;L>Enl#Kn8shYoQE)i`H^*4#;sy6O$yhJYk=FBcyfW zVn0G7{|nptEGo}iY%BKoUgvP^vUb^t{RuD^eu11GMj%l&7;f68kwHaARC}-SVE%Pq zo%X9tRjcjhNbcA}i zwWDuI8FN*cU0GGD`5p#8_Ri?{5{7Ec!i0$hY3z}`q$gNuS0oE#@hS|Hlr@zG<-3ra z3I)wz9N&l1nXS6pOpgvx5iHHxIRXN$-%-v^g!MzUuqv)mvT;f=D0)}S#FS5ekA$)4 ztW?kK;@K|RsNFt2%lTSf>ciRf<7-5WvhPc~z%(nWJUx5W;=z1tFi&UQXaG|_-4`xr zvUK;~bE~~b)hJDNH*p*;oH5PEO(k!m+?l=y{a7Ua(6#AcJo$L6X}j}0ktJQMNlzpL z0aUzQiy7wjjm!=XoGeE)8YghMQK$Y(#nrLQI!Y~_YSB)#wT2Pb)?_DR>9U>igtLrz zWEEGY5|-zCEGay=#fQG$uT=uHaX><$Mq^a2kC6}q6#ua?)t^XR47~usa%b1xNk9KD z0A-oah(0(JTA%;whcbeQp5!+#TzG@xO@^Y7+(-U1 z{K~F`w88Eep1#N^Rc(mPtT(o+? z6vrQK1|FL`Tw#2#dawGm(a(6l91o>oF=d z0kM!6fo2EdT3`y+O*S2Ef``Cel z@p#&u6;@LMZ*`@&-OlM24D9jfdVR6Iz&o3kSyJ$Vn5~Z8TXgW2cCQ;B6EuvA6QrwB zPz9E!(M!m-Q0A6U;Ep<;##>`#;^L{|n49)L)?LKK$0VMhW+5;)Ar#^j_T*1LpXroy zaZk;Ym3T~wTc;TQNn@t21e>oXP6{qmseePtrP?U!py-+GNK^TynTijV2kgUOyuU0#ZQ{(~#g z8srh?cB{?IO0a|dT<#@U+=#w@&41iE`pV^(im&;oz{Im}KNNvX5Em6zN!UnaVuxbZ z{*7bK7k_o@pz7u5@;WlIt;O*l9Bdpn4fY-QY0`wCGN5^mjYwX@)#+STdk)k(E?ubV zVCqm<^KP1w8$~x3dP5??apIPUQ1{vo#CDhI;;HHnDSBR& zz?4(hhIV#G`2!W-NPmY7XNq5+f~fzyn$`bVf~k%D2N&J-4^EEpb)Rk+)LM4_T!!Uq zARLUCf7L#arq3*Z57!k&C;@#KqDVg3ylgBSromL>LF;Y8CY9RN8(O(0hpAMg8&$!vwvO8OCL zB2@U$Wz4_qVI7s7rLFx*?FwJPQa=0-W=H1EyFA~F^n=r;?m>DFa^`@$!r!~ZmPrA1 z9Qwh`oZk@p!jKyN?%!id01|;#=@-qellK}r==q*s!)blJEeMvnzalfL$E!L*qwzV7!pOtuwM)8N$eYP#xqxwb~FHpX)Bv@myb!ukmWR6 z-ICh)5}WNdFE#7J^sixdZUfH9$@-&7cVY(eG~G@v0Wss@8{ZQO<&uS^8BX{(q^qQ@ ziOMiI*O}`HbZdZ|_;-Z!B`Xm$5>4yo!~?&*roZ|bR$M}!{~AwTcldqD$u-$3H;Gy) z53p6?c!J=E)PI>Z_MhMW;R>VbbK~`c zH#YAwq>_KOU+C{}0r~I$!aw+bA|b%~e|!BaOggt%glCccAxL%BRfj&pbYkn8aT`d= zi!}4O2Mc7-;f`?EeZAL8>Js=E=^N@zV~os~UTwvcs4ngy4eC+9Z>u_Pv%t6^Yc5Az zyDfH_%d_m(Gtz3Se{iLKdv8Oak7p&H*4H}9&q&>#uB%(#sMVcEep@&G+($yyuO){< z7H-Qc@K#-Bx*Dqodc4?SNojA| z3X~xW|J=t_AwXLiRy6(Pc}K{*!`IOV-pjaog)`?)+AE_hjT} z1AJl0o^aK9XumGDq>6SmJnUZL|Mjqe_z%u@Qd-rogLSQpc0<=B3tDzXtJWqrBg#8h zX_~yW^@b!k*LCT58XS8j7o3Jpvj9~LU z==o$v1DM_W6+WZ25{O=jEd}Azmwgu`^-9q2!6R7PB$y&LmiqilQqs>b(1gV|2=b*Mf>Xx*8ClGej^k)!#w=xDCo=Mam`{^fu!^T@vws`IKAmD;H z{#t1%&AB!=;{!aJTw3Z;uVE;bzS+nu{9FGmtZ{8m?B@LXT;W5{*(}e3+p~woVO~&X z0y%diS)x0a*VlKAVU;0FNs%s!N^>?}@(>U@Y0{ubL6N$S6e(3eH)uA;9&O7&VKEeKwwf84kz`Z(q8Z?VO!eAomV&y9 z7W_-#L35}!yen}`L#iErcgbyHp@oL1Y&?#wdXhe6H$Tx~+g)g=?Og#g$b)sAEZZM{Eg`s7gFtpmQ0^R+6IQC z2=)Dv%;8>4jC6rSzh>pOj3sj1 zqZ)?JICLZq?R})Oz98mEsmNV5?pS(9?S}qdc}m1I5X0!(K+&;-nEV*=&=2b#(Zd*X zUK)uwAK=?G6=NqO@f2YzOnkdl8StRkV5eV1A~n|Nal}142yz7}_A6WUhX01p{E6z2 zXlf&ZpP$+Y3xPUiKU0cYujsBAla`x-l>keHW+Z;wXzFiFOg+oDzUc8t1L}cTbDx5W zuHb3u=}pm0>6QvBAyX;BVJ8%ARZe?MO3C2urK~=q1#WAh%kAR{i0u=2?dzPEe5{#q zwuVE6^~M7@9HD05gzvR_sRDBe1zCQ{d3tK<3E5^Ky~D)Mk|k9%#VNjz<<+{fWxTj5deg|7OPTx_^<- zFv?5cj;H}sk5f8>kRHxgYk`*oPd9k;;cQRj6WLG3oCVsGj?mGotwVk~x_>u;|C{FH zKLv#SSKHP9@FQz!vXDTP-0AD6!<;am+zFk3I}FQYbXEgvtj zuSD~AOFolI@FlRoNE!Hwidc7oM}?q1wngQ%{Azr2FDnPWQ+sX=t_q&r#_vfSoR_}~ zF0d_J7>3FslPuj1K0=Ipl;MXuHxC@SkBqOtbL})UY~!A>7Mb7pfAL4NrgHFBMd;)0 z+BJCOTjKXT-1Lc74*9$Kwt#vwSo}y=lxXbCjyS*##i{nQ%T8z*B(} z$Ly9VOdr51_e-xSBvjl~hvqE$dWLJL%UzRvsF_Z@|UXZZH#VOHb-GIhHX z05v(~FM9SS8Qz7C#~}S&opy-sH@uDx`$tdmIE#~PaYBeY`gg4T2%E6|afjz@%!S@$ z-+K05idlw#er?^!)SLXadt2}OR}^MFw?ZLT8a&Z5s@d^r%s7~5{Yg7|n$q2?>0Y0B z0(y2sh=Yc6%)b7)2OJ^kTxKEv&X$uU^3}-s$KdP%{sI0uvL~^TC!G zk@|$71M}dYasT<0@E_)k|DOSK>{k?tti-Z4x|J)6p3#7$zB}RHA?9NDn$QF#^9ng@ zS@XeStk+t zQwdT)X~2q8vddH+X=l!aEkkl&f!0JT>YLbDr9|%1<}axRn#eJAiCR+&2rhGg(Q7vB zxK%J&N5G%juf(5&UDBS_twcTl$w5->;`vi5cm7Sn7!U>HH0`5#Qlv>67Az3)N0$}5 z!(pDQaMHh90yeAtXAg9!e3m=pkUy0P6?+`^D*6q&JjEr0UB_xREalz3$i35or*pA> zB;zNRU~S!ad<+iQ6;6bWtERXRYNfOS-~E>Ls#0OTG>^(6(CyYF?9t|avEbip*`^^=B)o&@ z6CniegarOYDOv{W?++VBpjlKE9K03NXI~in(DDgW130Cc-AD93inPGNrGb-}zp_8d zQHZ9#`2NZ>d^{S@B8rEjt=3E9s3G4%N2g+~WasM4_Nx6zvxsb}hwJ0Wa$ilMKpzy>xllCuzS{&$YzF`p_82H5WAW7sbwt zcF|`l5PwCY5sAO@7eyzKYf18u_UE0($Sv({_r%j0bxwLy>$7pUDtC@6%Ehu)O0_&{ z?hEV0D{kKI%^24HJnu=W^*aIZww0w=v6r_>>ez|%(~Ekzd2~^Cv1h(38>0G|CFJ;| zmoO)C*VsGZ)|(xHzpcqjam|wVmZ)FNxy?GL-fXF& z%_~6PiGbFAg4QB}@&>vd4ZR?GW`&c@`gg%kqJ1&x^e4Wrq^AbWHGX1)3}IdiGe;S* zmxy;1ON732Z1JN`JzQFEN0O*1Xlj#_QkP^%xW(ga?z zSt413Y+|c(T1elzwo3y|;(AnA^c*?WLSC;%aez!>RGc{@JCWITrn` z-JKFD2jm8q0L?rFTX=%gt%oYDz<{14?83Wu@>n-0MwR~3!YAxi0i+q_vnHbpqhbbw zYqL~s6yD+V`1|23CsIL;^xwTWBX;>YWg;iHu5+i|Nl2=ICsp|_)0c{q;0auYul!>Q z-a#6YC9^|G_!jWGb`EKsbyQ1Sm+MfQN++@}Cq)bfu0GMy1*oySxlgq}zN1{Y(5VX2 z4Dd#wU8P{9b(y(iIY8@yNVN-~lt>f7>c)fmCaqoUbes~1pKT?jNobS5h-3eJfFNWP zJQB%J*49FzAzLZFQG*@0u7+0c{^YEMADGl?Iyy2Owwr`q;uSml#FL{WHw7)+evh2! z&1!>0evegw8H8f(l?9zzW;EILmkYGDIP82kP1b?uFN0bP#79evs)}xsI$mpNQk%73 zVqbKEk9j8Sx$z^Eb{FpYy?dLwzDa+wD=wk*-LN`z`w{IGap$J$HN$(k1=Zp_6mp#M zoC+9q=9+g&5dI$bbG!?9XB!IEp7S@;`|qt;@?w^lgj~=a0w{BM*_Ebc z_+$DD6&#ugnhWCj4l&oQay3TV@QZ}49HKSr-MjJ@VrN7Ly}?89a%URL(4`kI5EpUl zzo_v9nl0{&#}iFLlx+Gq$C#bJ=Mjcnipu^- z(JO4yE81l&CRRT>+5XIb4o7@?^fbf-kd}MEfB8;Ha<~s^=u4AH#3x@Q$tvadju+n$ zqnGoxW`b?l1a*Y1AmVg!`R5j>PF+$bd2yk6+AOdc`zD&cMguq z*o=ge2`iCMY-=c}$27}V+8?HkN{O=kDIam z*5%WPzJO*Z%hl;`aW9&wIp2b_7T~gXzEK<{SX20X&in2K)`K;4Zg>h!7NEa*dL#8l zfc3=Vd7+*8UO_6@#z_oYc-~7mCH#Zk)4Nt~FNLB7>&%vZthO8fMjKI${P+VW{#_v{ zIwqaF?lx|2E?uc6KFODg21b47!SMJwn*~zT9&a9V)K^t2T?Ou)7uo)r_cexXe`Iq@ zl56}8@|a$~e9t9FX^HO8thYl(7&urIhxe99xK7KLZ`DjPe=)ujU%P+t1;aW0f%ubM z>0}#|Z6P9-DvZ%{ z=@ty!8Z+j#+L}_h&Z*r$Duw0BT}_gPo(la6G8uQ!J&IpsEp-V)^K>aw$kar~s0n%Q z{Tom1M3Q@-k!gO(r56)bIvEbxSI3c&8=62z(FV7H5|A7gdifT&ir*|PfG+0;W@a^T zf-=B>L^9_tmdkX&o znR|L|zA*M02|1XErP|cCt{>jaBQvl0p1 zSkbZzkHd1mB9K>L7&z5qtsCm2vA}g^IyhCOr?ouS?7q0h((#xPJ?Y@aN-bckg=$rz z2nc~b{+u%P7%j&F>m(c#F@PFLUQEhtQPiptdx=-D;3Tb%T_sT)cfvF0{kd4sq0P0! zAZLgwQaJ>?+HCg`Mv!AYF6|b@PFC9&A`!61=q`CUCKJx1(%@;syR=8_er>{Ac*y}4 zl-;(ut}>i>pE34frW)k;Ds|fGH$rHMx~p#~e`L!S*r+n6eCYm4X_N7XQH*HY9At(q z5hUK`heJ{OnQ*`7o!$L;$xK^w=JZZ-We$jXJyRX?>kUckn?jSH^%nEiw?V~=dB5VS z?dy4-VQmSaaE>#6z5zW$W9K1L^N4`5L_$TrZU=Qd^$v4i-x3M(U3O>s0oZ&RQI7J9 z@Fk-xexdw^1_~C)hnsR4CVeaqo}pC~@buVDqQci}f%q(0+XEab)e`27x+x9x18Hw#*EiJT&bimT zTD+&q1!fC0zh0?-#m~3D4Em^OlTZ1_$5FEw6X)FGL)9yT=A&T=@H9&zR$Cr#T{WHw zBHTi{J_5M*hQi|0VP#6cjMZp;Cjt_f@^}T^R0SX`mVpi46ijOK$s~E}I35HEv;C8W zc#b~~SN7kFv%bst*#EYI13ZRfEXfgA@8QtUH&2yI#~~|cYUuW9Z;AS~>je~mmnz)vg?>O@_wHkL`Mh zGOEC+eoBt)a7CAML)p|zh<(CTnQwQ6^J)ex(@0LCBBIPAX>E!fy5Yz*L@$Pjs}3ue zas1734^^&Re)w#VP$4`Ws|R<)J`tXd-H2Klmh$A z`QaF@gFY*h z`iAKMk`(iqT|iNaSB_d}|GJPVZWzOo6k=vkA2XaqAV})D*I&MX^Yd)OfMhTz^gyaZ)v~Xas1Q6u{^sh1I~nv8@}`L zUj9;%d=V|>Cb5ZgCLE#)dT$7O6Z(bN1eY0yC4cqRrO&E>`EdB#ur&|o&T@RSw*`x? zdth&XStj-}aO?@AOB5$S3*{bn^^@n)X`83rmw(%bsyLq^-tH8Q;LqMJvA1p=S*3>H zRDigpjEaA|W%bE7k<|C{k?=cITx%fLoFktywQLYoN>}Q+$l8@5U;}!q4cfbiV1V4V zn$+H@XWz0x{-S8#z)dBkBro~|GG&nW!N6M^XY`&782Re3xuXe8-dw; zd8RwqHZXI&CXECQjov0dEiRz$xJdhx`V$VyXv02W-G2(BZl^u1GvD7uM0c7@>6EfdoXFSf5mULqX9`Cg`eyW|RvGuQ`5S!zrM zEpj=WK)Xug1Wn=4>2Hj!?2{G=r0|Ocx`Q|IQnu2TO*B>s*0XXK-}62tuy*!u&yfEB z_kcOr1LNRw$r9rZWhrc4=QQTrbA!E4?dO(-N>p<`8y8&(Ov(CSE*7-eiR8OhPD97SKXOP*=`JZ2 zGY?|LeDiqjw|`NT$@&1*QQW(0Z#0>@Tt~y%Y+A5!O5l`SI{VH;BbZ|w(uG_pH@U}FlAO7ZQZOhxZKp=$A@d4dLe#@2KYa4j`P^BY zIuUz%JrM9rtLlHi9*yhG@ZUnc5=W(XU*=Lz43u@wa?AOAczTJYVw84xq}a zNNX522oWb3XzCsBm}nsDndWT#kQl8IiO!OL@&lV@%s-34Z8Q_Aiy zmO=$XPw%XUNE$qb`B_)KaxTQn$zHwKS>!dP z&0LD03R?Ms1mo+Xivrm&89n+o=AgVLxs~)DkL?_;?f~3m;8k2S$_7j{+9Q)Lf&yp| zVKam2_+BkL&c%ouIa~7lBIou?V`N!&@JFu)t8KRE^0rrH76X2|DHrEC7hE|&otKgy zn*1oEb#@U7WhXh!Q=Vvl*N zhkB6y5=gjX;C#Qih=OCFWf;-d&?lp8fuqOJU0^wIn?oruP&>TxbKC_69`em}1Gnqm zkR!ZmQm-85Mmi~r>Sw-o(&wub^@DC!Y2~*{SO=;b6VTh+tPgsHkt1@=V6wC4Hs_q7 zlS=!tjEN)FDUJlD@tVZc9^=GWlHb8i0~fS4G*1B?^>YNCEiU}hi6>S15z??oLrl?V z%I{VaFKqEG*c~_QGeJ(D{&$ltqgb{dnbCtOZPWtDUV$Yg`l$}cVhV6$v2#{f8N#b&n@42u@*_cvE@haszLcg)KN*{k zQQxl1KrFfBxZh0L@Rb=0+1brom}3ARTsS@$V7F;?Q*OX3NT86qn!cpB*YaSAWZ&Z) zby}*NYFlm1-AASZxHgt+TZ-C}LELI2 zXsO_2j|V|cTV_eYU|OmzTH0d>_ub5=yn$HprNhCf8P@j|vu0fA+jrwUSlo*p&@g9j zpFusRrtH$5)cU#SXu@3*v)iFX0CI`1Um4cHtP0z)_orK!*SNO|9}?wH2=+L1)m|pS z^E$}ObKAwT;1+DvQiEz9Qw2&Olo5Bv>U8{0GZBKd-PQRBHFH}%7_rG$MiF>$6e=$i zEU5obl>iHj{sz6dcbqh`j?c_3_1aqzOF{iJY;j^0tcEI>=`r*N8v{dMN+n@Nt9Qns z=G$-zr7`6ynXtDLrVcU@mgm^f^W%)L;B{gjV#{e|b&9umxKckvh6QyA1bb;z8uiVj23H(6Z?W@9A;IX+i?qkGT`_N2WRxr98gsl*~{yZt5eS^pWTK#{Wv(m z8-IzG64X_lIDUA6qyD;J`Ay7`t1aQ56N@?H<|jKf`u>PdFOKB&c4ZSslk9v^_^-!f z7`2KRy$11Epu#=ZHOPs{?*T$V9XFnw)OY2*r5-KqWR=aMrF=*%?GL)XL)imPuboG@ zt3nf{15!|CzD<_$cO?xq7#--e$Hd%>HQEl!4USI~e_K19xzQ8ans& zPClM(kdI`&H)J;9Yp}ryG)mDKwY4Iyu~I5u&4$SYZgFiAl3hbj&ok72%dAsrr8&`F z0>z(77i^h?0G*dUOJh;hcr?nivx#Z%1G>wysAq$UaN4+O?g2caN6jGcVRCoSy#mQj zh8a%eEjEdBOHS_<~SXUQeQUXhMsB2d;q+Ycn1&5ae3E@n@My~X0q%i{$>Jz zt#oo}qg&57nh0#QQCW12Zv6PO#lXv*l+{FiNg^pQ&zjRYx0MRn9$6-4@ZNI4;Y_(V z^91ccI**5vQTCW1UekIwxun1_a7A$GAI&9E@utL%;g2Scb!Ng@I9nb2r_Q$%GkPLS zJf)wUy 开始,之后跟随一个或多个消息。 - 2. 每条消息以标签 <|start_header_id|> 开始,角色为 system、user 或 assistant,并以标签 <|end_header_id|> 结束。 - 3. 在双换行 \n\n 之后,消息的内容随之而来。每条消息的结尾由 <|eot_id|> 令牌标记。 + 1. 提示以特殊令牌 `<|begin_of_text|>` 开始,之后跟随一个或多个消息。 + 2. 每条消息以标签 `<|start_header_id|>` 开始,角色为 `system`、`user` 或 `assistant`,并以标签 `<|end_header_id|>` 结束。 + 3. 在双换行 `\n\n` 之后,消息的内容随之而来。每条消息的结尾由 `<|eot_id|>` 令牌标记。 - Ref: [ArtificialZeng/llama3_explained](https://github.com/ArtificialZeng/llama3_explained) ### 安装XTuner-0.1.18