The quality of texts generated by natural language generation (NLG) systems
is hard to measure automatically. Conventional reference-based metrics, such as
BLEU and ROUGE, have been shown to have relatively low correlation with human
judgments, especially for tasks that require creativity and diversity. Recent
studies suggest using large language models (LLMs) as reference-free metrics
for NLG evaluation, which have the benefit of being applicable to new tasks
that lack human references. However, these LLM-based evaluators still have
lower human correspondence than medium-size neural evaluators. In this work, we
present G-Eval, a framework of using large language models with
chain-of-thoughts (CoT) and a form-filling paradigm, to assess the quality of
NLG outputs. We experiment with two generation tasks, text summarization and
dialogue generation. We show that G-Eval with GPT-4 as the backbone model
achieves a Spearman correlation of 0.514 with human on summarization task,
outperforming all previous methods by a large margin. We also propose
preliminary analysis on the behavior of LLM-based evaluators, and highlight the
potential issue of LLM-based evaluators having a bias towards the LLM-generated
texts. The code is at https://github.com/nlpyang/geval
%0 Generic
%1 liu2023geval
%A Liu, Yang
%A Iter, Dan
%A Xu, Yichong
%A Wang, Shuohang
%A Xu, Ruochen
%A Zhu, Chenguang
%D 2023
%K generative_evaluation
%T G-Eval: NLG Evaluation using GPT-4 with Better Human Alignment
%U http://arxiv.org/abs/2303.16634
%X The quality of texts generated by natural language generation (NLG) systems
is hard to measure automatically. Conventional reference-based metrics, such as
BLEU and ROUGE, have been shown to have relatively low correlation with human
judgments, especially for tasks that require creativity and diversity. Recent
studies suggest using large language models (LLMs) as reference-free metrics
for NLG evaluation, which have the benefit of being applicable to new tasks
that lack human references. However, these LLM-based evaluators still have
lower human correspondence than medium-size neural evaluators. In this work, we
present G-Eval, a framework of using large language models with
chain-of-thoughts (CoT) and a form-filling paradigm, to assess the quality of
NLG outputs. We experiment with two generation tasks, text summarization and
dialogue generation. We show that G-Eval with GPT-4 as the backbone model
achieves a Spearman correlation of 0.514 with human on summarization task,
outperforming all previous methods by a large margin. We also propose
preliminary analysis on the behavior of LLM-based evaluators, and highlight the
potential issue of LLM-based evaluators having a bias towards the LLM-generated
texts. The code is at https://github.com/nlpyang/geval
@misc{liu2023geval,
abstract = {The quality of texts generated by natural language generation (NLG) systems
is hard to measure automatically. Conventional reference-based metrics, such as
BLEU and ROUGE, have been shown to have relatively low correlation with human
judgments, especially for tasks that require creativity and diversity. Recent
studies suggest using large language models (LLMs) as reference-free metrics
for NLG evaluation, which have the benefit of being applicable to new tasks
that lack human references. However, these LLM-based evaluators still have
lower human correspondence than medium-size neural evaluators. In this work, we
present G-Eval, a framework of using large language models with
chain-of-thoughts (CoT) and a form-filling paradigm, to assess the quality of
NLG outputs. We experiment with two generation tasks, text summarization and
dialogue generation. We show that G-Eval with GPT-4 as the backbone model
achieves a Spearman correlation of 0.514 with human on summarization task,
outperforming all previous methods by a large margin. We also propose
preliminary analysis on the behavior of LLM-based evaluators, and highlight the
potential issue of LLM-based evaluators having a bias towards the LLM-generated
texts. The code is at https://github.com/nlpyang/geval},
added-at = {2023-06-11T02:02:47.000+0200},
author = {Liu, Yang and Iter, Dan and Xu, Yichong and Wang, Shuohang and Xu, Ruochen and Zhu, Chenguang},
biburl = {https://www.bibsonomy.org/bibtex/227794c52b94503434fee75dbdd3328fa/hassanpour71},
description = {2303.16634.pdf},
interhash = {f79293b5e7a7562464b69a48963b8605},
intrahash = {27794c52b94503434fee75dbdd3328fa},
keywords = {generative_evaluation},
note = {cite arxiv:2303.16634},
timestamp = {2023-06-11T02:02:47.000+0200},
title = {G-Eval: NLG Evaluation using GPT-4 with Better Human Alignment},
url = {http://arxiv.org/abs/2303.16634},
year = 2023
}