@inproceedings{sarch-etal-2023-open,
title = "Open-Ended Instructable Embodied Agents with Memory-Augmented Large Language Models",
author = "Sarch, Gabriel and
Wu, Yue and
Tarr, Michael and
Fragkiadaki, Katerina",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.226",
doi = "10.18653/v1/2023.findings-emnlp.226",
pages = "3468--3500",
abstract = "Pre-trained and frozen LLMs can effectively map simple scene re-arrangement instructions to programs over a robot{'}s visuomotor functions through appropriate few-shot example prompting. To parse open-domain natural language and adapt to a user{'}s idiosyncratic procedures, not known during prompt engineering time, fixed prompts fall short. In this paper, we introduce HELPER, an embodied agent equipped with an external memory of language-program pairs that parses free-form human-robot dialogue into action programs through retrieval-augmented LLM prompting: relevant memories are retrieved based on the current dialogue, instruction, correction or VLM description, and used as in-context prompt examples for LLM querying. The memory is expanded during deployment to include pairs of user{'}s language and action plans, to assist future inferences and personalize them to the user{'}s language and routines. HELPER sets a new state-of-the-art in the TEACh benchmark in both Execution from Dialog History (EDH) and Trajectory from Dialogue (TfD), with 1.7x improvement over the previous SOTA for TfD. Our models, code and video results can be found in our project{'}s website: https://helper-agent-llm.github.io.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sarch-etal-2023-open">
<titleInfo>
<title>Open-Ended Instructable Embodied Agents with Memory-Augmented Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gabriel</namePart>
<namePart type="family">Sarch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Tarr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katerina</namePart>
<namePart type="family">Fragkiadaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-trained and frozen LLMs can effectively map simple scene re-arrangement instructions to programs over a robot’s visuomotor functions through appropriate few-shot example prompting. To parse open-domain natural language and adapt to a user’s idiosyncratic procedures, not known during prompt engineering time, fixed prompts fall short. In this paper, we introduce HELPER, an embodied agent equipped with an external memory of language-program pairs that parses free-form human-robot dialogue into action programs through retrieval-augmented LLM prompting: relevant memories are retrieved based on the current dialogue, instruction, correction or VLM description, and used as in-context prompt examples for LLM querying. The memory is expanded during deployment to include pairs of user’s language and action plans, to assist future inferences and personalize them to the user’s language and routines. HELPER sets a new state-of-the-art in the TEACh benchmark in both Execution from Dialog History (EDH) and Trajectory from Dialogue (TfD), with 1.7x improvement over the previous SOTA for TfD. Our models, code and video results can be found in our project’s website: https://helper-agent-llm.github.io.</abstract>
<identifier type="citekey">sarch-etal-2023-open</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.226</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.226</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>3468</start>
<end>3500</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Open-Ended Instructable Embodied Agents with Memory-Augmented Large Language Models
%A Sarch, Gabriel
%A Wu, Yue
%A Tarr, Michael
%A Fragkiadaki, Katerina
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F sarch-etal-2023-open
%X Pre-trained and frozen LLMs can effectively map simple scene re-arrangement instructions to programs over a robot’s visuomotor functions through appropriate few-shot example prompting. To parse open-domain natural language and adapt to a user’s idiosyncratic procedures, not known during prompt engineering time, fixed prompts fall short. In this paper, we introduce HELPER, an embodied agent equipped with an external memory of language-program pairs that parses free-form human-robot dialogue into action programs through retrieval-augmented LLM prompting: relevant memories are retrieved based on the current dialogue, instruction, correction or VLM description, and used as in-context prompt examples for LLM querying. The memory is expanded during deployment to include pairs of user’s language and action plans, to assist future inferences and personalize them to the user’s language and routines. HELPER sets a new state-of-the-art in the TEACh benchmark in both Execution from Dialog History (EDH) and Trajectory from Dialogue (TfD), with 1.7x improvement over the previous SOTA for TfD. Our models, code and video results can be found in our project’s website: https://helper-agent-llm.github.io.
%R 10.18653/v1/2023.findings-emnlp.226
%U https://aclanthology.org/2023.findings-emnlp.226
%U https://doi.org/10.18653/v1/2023.findings-emnlp.226
%P 3468-3500
Markdown (Informal)
[Open-Ended Instructable Embodied Agents with Memory-Augmented Large Language Models](https://aclanthology.org/2023.findings-emnlp.226) (Sarch et al., Findings 2023)
ACL