From 74f07dd43cb425cb90c5009dfe37214c18b7a7e3 Mon Sep 17 00:00:00 2001 From: HongCheng Date: Sat, 20 Apr 2024 01:28:36 +0900 Subject: [PATCH] update download and model.generate --- .../README_llama3_8b_instruct_qlora_alpaca_e3_M.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md b/xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md index aef0b60..adb1a59 100644 --- a/xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md +++ b/xtuner_config/README_llama3_8b_instruct_qlora_alpaca_e3_M.md @@ -269,13 +269,14 @@ from modelscope import snapshot_download import os # download model in openxlab -download(model_repo='ajupyter/EmoLLM_internlm2_7b_full', - output='ajupyter/EmoLLM_internlm2_7b_full') -model_name_or_path = 'ajupyter/EmoLLM_internlm2_7b_full' +# download(model_repo='MrCat/Meta-Llama-3-8B-Instruct', +# output='MrCat/Meta-Llama-3-8B-Instruct') +# model_name_or_path = 'MrCat/Meta-Llama-3-8B-Instruct' # # download model in modelscope -# model_name_or_path = snapshot_download('ajupyter/EmoLLM_internlm2_7b_full', -# cache_dir='EmoLLM_internlm2_7b_full') +# model_name_or_path = snapshot_download('LLM-Research/Meta-Llama-3-8B-Instruct', +# cache_dir='LLM-Research/Meta-Llama-3-8B-Instruct') + # offline model model_name_or_path = "/root/EmoLLM/xtuner_config/merged_Llama" @@ -295,7 +296,7 @@ while True: input_text.replace(' ', '') if input_text == "exit": break - response, history = model.chat(tokenizer, input_text, history=messages) + response, history = model.generate(tokenizer, input_text, history=messages) messages.append((input_text, response)) print(f"robot >>> {response}") ```