We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide the reasoning steps, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L models and observe substantial performance gains on a wide range of challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU Physics and Chemistry by 7% and 11%, TimeQA by 27%, and MuSiQue by 7%.
%0 Generic
%1 zheng2023step
%A Zheng, Huaixiu Steven
%A Mishra, Swaroop
%A Chen, Xinyun
%A Cheng, Heng-Tze
%A Chi, Ed H.
%A Le, Quoc V
%A Zhou, Denny
%D 2023
%K Google deep-mind llms prompt-engineering step-back-prompting
%T Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
%U https://arxiv.org/pdf/2310.06117.pdf
%X We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide the reasoning steps, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L models and observe substantial performance gains on a wide range of challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU Physics and Chemistry by 7% and 11%, TimeQA by 27%, and MuSiQue by 7%.
@misc{zheng2023step,
abstract = {We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide the reasoning steps, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L models and observe substantial performance gains on a wide range of challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU Physics and Chemistry by 7% and 11%, TimeQA by 27%, and MuSiQue by 7%.},
added-at = {2023-11-27T23:41:54.000+0100},
archiveprefix = {arXiv},
author = {Zheng, Huaixiu Steven and Mishra, Swaroop and Chen, Xinyun and Cheng, Heng-Tze and Chi, Ed H. and Le, Quoc V and Zhou, Denny},
biburl = {https://www.bibsonomy.org/bibtex/21635492f4be91b1f17e7246c20ebe81b/ghagerer},
eprint = {2310.06117},
interhash = {6967f98b06dc602e7d5ba7e07e131ab4},
intrahash = {1635492f4be91b1f17e7246c20ebe81b},
keywords = {Google deep-mind llms prompt-engineering step-back-prompting},
primaryclass = {cs.LG},
timestamp = {2023-11-27T23:42:53.000+0100},
title = {Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models},
url = {https://arxiv.org/pdf/2310.06117.pdf},
year = 2023
}