@inbook{e976983075b14da7b3f386a05f88a5a0,
title = "Latent Trees for Compositional Generalization",
abstract = "Despite the success of neural networks in many natural language processing tasks, recent work has shown that they often fail in compositional generalization, i.e., the ability to generalize to new structures built from components observed during training. In this chapter, we posit that this behavior, in standard architectures such as LSTMs and Transformers, stems from the fact that fragments on the output side are not explicitly tied to fragments on the input side. To address this, we introduce models that explicitly construct latent trees over the input, which are used to compositionally compute representations necessary for predicting the output. We show the compositional generalization abilities of our models exceed the abilities of pre-trained Transformer models on several datasets for both semantic parsing and grounded question answering.",
author = "Jonathan Herzig and Jonathan Berant and Ben Bogin",
note = "Publisher Copyright: {\textcopyright} 2023 The authors and IOS Press. All rights reserved.",
year = "2023",
month = jul,
day = "21",
doi = "10.3233/FAIA230161",
language = "אנגלית",
series = "Frontiers in Artificial Intelligence and Applications",
publisher = "IOS Press BV",
pages = "631--664",
editor = "Pascal Hitzler and Aaron Eberhart and Sarker, {Md Kamruzzaman}",
booktitle = "Frontiers in Artificial Intelligence and Applications",
address = "הולנד",
}