Large Language Models (LLMs) have shown impressive performance as general purpose agents, but their abilities remain highly dependent on prompts which are hand written with onerous trial-and-error effort. We propose a simple and nonparametric solution to this problem, Prompt Optimization with Textual Gradients (ProTeGi), which is inspired by numerical gradient descent to automatically improve prompts, assuming access to training data and an LLM API. The algorithm uses minibatches of data to form natural language ``gradients'' that criticize the current prompt, much like how numerical gradients point in the direction of error ascent. The natural language gradients are then ``propagated'' into the prompt by editing the prompt in the opposite semantic direction of the gradient. These gradient descent steps are guided by a beam search and bandit selection procedure which significantly improves algorithmic efficiency. Preliminary results across three benchmark NLP tasks and the novel problem of LLM jailbreak detection suggest that Automatic Prompt Optimization can outperform prior prompt editing techniques and improve an initial prompt's performance by up to 31\%, by using data to rewrite vague task descriptions into more precise annotation instructions.
%0 Conference Paper
%1 pryzant-etal-2023-automatic
%A Pryzant, Reid
%A Iter, Dan
%A Li, Jerry
%A Lee, Yin
%A Zhu, Chenguang
%A Zeng, Michael
%B Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%C Singapore
%D 2023
%E Bouamor, Houda
%E Pino, Juan
%E Bali, Kalika
%I Association for Computational Linguistics
%K dmir-readinggroup nlp optimization prompt
%P 7957--7968
%R 10.18653/v1/2023.emnlp-main.494
%T Automatic Prompt Optimization with ``Gradient Descent'' and Beam Search
%U https://aclanthology.org/2023.emnlp-main.494
%X Large Language Models (LLMs) have shown impressive performance as general purpose agents, but their abilities remain highly dependent on prompts which are hand written with onerous trial-and-error effort. We propose a simple and nonparametric solution to this problem, Prompt Optimization with Textual Gradients (ProTeGi), which is inspired by numerical gradient descent to automatically improve prompts, assuming access to training data and an LLM API. The algorithm uses minibatches of data to form natural language ``gradients'' that criticize the current prompt, much like how numerical gradients point in the direction of error ascent. The natural language gradients are then ``propagated'' into the prompt by editing the prompt in the opposite semantic direction of the gradient. These gradient descent steps are guided by a beam search and bandit selection procedure which significantly improves algorithmic efficiency. Preliminary results across three benchmark NLP tasks and the novel problem of LLM jailbreak detection suggest that Automatic Prompt Optimization can outperform prior prompt editing techniques and improve an initial prompt's performance by up to 31\%, by using data to rewrite vague task descriptions into more precise annotation instructions.
@inproceedings{pryzant-etal-2023-automatic,
abstract = {Large Language Models (LLMs) have shown impressive performance as general purpose agents, but their abilities remain highly dependent on prompts which are hand written with onerous trial-and-error effort. We propose a simple and nonparametric solution to this problem, Prompt Optimization with Textual Gradients (ProTeGi), which is inspired by numerical gradient descent to automatically improve prompts, assuming access to training data and an LLM API. The algorithm uses minibatches of data to form natural language {``}gradients{''} that criticize the current prompt, much like how numerical gradients point in the direction of error ascent. The natural language gradients are then {``}propagated{''} into the prompt by editing the prompt in the opposite semantic direction of the gradient. These gradient descent steps are guided by a beam search and bandit selection procedure which significantly improves algorithmic efficiency. Preliminary results across three benchmark NLP tasks and the novel problem of LLM jailbreak detection suggest that Automatic Prompt Optimization can outperform prior prompt editing techniques and improve an initial prompt{'}s performance by up to 31{\%}, by using data to rewrite vague task descriptions into more precise annotation instructions.},
added-at = {2024-01-18T09:15:18.000+0100},
address = {Singapore},
author = {Pryzant, Reid and Iter, Dan and Li, Jerry and Lee, Yin and Zhu, Chenguang and Zeng, Michael},
biburl = {https://www.bibsonomy.org/bibtex/2df981c7f8981468ac164fb52a9752e86/martinr},
booktitle = {Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing},
doi = {10.18653/v1/2023.emnlp-main.494},
editor = {Bouamor, Houda and Pino, Juan and Bali, Kalika},
interhash = {d7b46df3f51d90a97a7528494e223b2a},
intrahash = {df981c7f8981468ac164fb52a9752e86},
keywords = {dmir-readinggroup nlp optimization prompt},
month = dec,
pages = {7957--7968},
publisher = {Association for Computational Linguistics},
timestamp = {2024-01-18T09:15:18.000+0100},
title = {Automatic Prompt Optimization with {``}Gradient Descent{''} and Beam Search},
url = {https://aclanthology.org/2023.emnlp-main.494},
year = 2023
}