diff --git a/evaluate/Genera_evaluation.md b/evaluate/Genera_evaluation.md index 40cf399..fe5c43f 100644 --- a/evaluate/Genera_evaluation.md +++ b/evaluate/Genera_evaluation.md @@ -47,3 +47,4 @@ pip install torch transformers datasets nltk rouge jieba |----------|---------|---------|---------|---------|---------|---------|---------| | Qwen1_5-0_5B-Chat | 27.23% | 8.55% | 17.05% | 26.65% | 13.11% | 7.19% | 4.05% | | InternLM2_7B_chat | 37.86% | 15.23% | 24.34% | 39.71% | 22.66% | 14.26% | 9.21% | +| InternLM2_7B_chat_full | 32.45% | 10.82% | 20.17% | 30.48% | 15.67% | 8.84% | 5.02% | \ No newline at end of file diff --git a/evaluate/InternLM2_7B_chat_eval.py b/evaluate/InternLM2_7B_chat_eval.py index 457a6f9..3c59441 100644 --- a/evaluate/InternLM2_7B_chat_eval.py +++ b/evaluate/InternLM2_7B_chat_eval.py @@ -102,7 +102,7 @@ for batch in dataloader: verbose=False, errors='replace' ).replace("医生:","") for i in range(batch_size)] - hypotheses.extend([r.replace(stop_word," ").split()[0] for r in batch_response if stop_word in r]) + hypotheses.extend([r.replace(stop_word," ").split()[0] if stop_word in r else r for r in batch_response]) # Load metric diff --git a/evaluate/README.md b/evaluate/README.md index 2d34755..260f3f0 100644 --- a/evaluate/README.md +++ b/evaluate/README.md @@ -7,8 +7,8 @@ | Model | ROUGE-1 | ROUGE-2 | ROUGE-L | BLEU-1 | BLEU-2 | BLEU-3 | BLEU-4 | |----------|---------|---------|---------|---------|---------|---------|---------| | Qwen1_5-0_5B-Chat | 27.23% | 8.55% | 17.05% | 26.65% | 13.11% | 7.19% | 4.05% | -| InternLM2_7B_chat | 37.86% | 15.23% | 24.34% | 39.71% | 22.66% | 14.26% | 9.21% | - +| InternLM2_7B_chat_qlora | 37.86% | 15.23% | 24.34% | 39.71% | 22.66% | 14.26% | 9.21% | +| InternLM2_7B_chat_full | 32.45% | 10.82% | 20.17% | 30.48% | 15.67% | 8.84% | 5.02% | ## 专业指标评测 * 具体指标、方法见 [Professional_evaluation.md](./Professional_evaluation.md) diff --git a/evaluate/README_EN.md b/evaluate/README_EN.md index 58b1115..a202542 100644 --- a/evaluate/README_EN.md +++ b/evaluate/README_EN.md @@ -4,15 +4,11 @@ * For specific metrics and methods, see [General_evaluation.md](./General_evaluation_EN.md) -| Metric | Value | -|---------|----------------------| -| ROUGE-1 | 27.23% | -| ROUGE-2 | 8.55% | -| ROUGE-L | 17.05% | -| BLEU-1 | 26.65% | -| BLEU-2 | 13.11% | -| BLEU-3 | 7.19% | -| BLEU-4 | 4.05% | +| Model | ROUGE-1 | ROUGE-2 | ROUGE-L | BLEU-1 | BLEU-2 | BLEU-3 | BLEU-4 | +|----------|---------|---------|---------|---------|---------|---------|---------| +| Qwen1_5-0_5B-Chat | 27.23% | 8.55% | 17.05% | 26.65% | 13.11% | 7.19% | 4.05% | +| InternLM2_7B_chat | 37.86% | 15.23% | 24.34% | 39.71% | 22.66% | 14.26% | 9.21% | +| InternLM2_7B_chat_full | 32.45% | 10.82% | 20.17% | 30.48% | 15.67% | 8.84% | 5.02% | ## Professional Metrics Evaluation