Selecting a well-performing algorithm for a given task or dataset can be time-consuming andtedious, but is crucial for the successful day-to-day business of developing new AI & MLapplications. Algorithm Selection (AS) mitigates this through a meta-model leveragingmeta-information about previous tasks. However, most of the available AS methods areerror-prone because they characterize a task by either cheap-to-compute properties of thedataset or evaluations of cheap proxy algorithms, called landmarks. In this work, we extendthe classical AS data setup to include multi-fidelity information and empirically demonstratehow meta-learning on algorithms’ learning behaviour allows us to exploit cheap test-timeevidence effectively and combat myopia significantly. We further postulate a budget-regrettrade-off w.r.t. the selection process. Our new selector MASIF is able to jointly interpretonline evidence on a task in form of varying-length learning curves without any parametricassumption by leveraging a transformer-based encoder. This opens up new possibilities forguided rapid prototyping in data science on cheaply observed partial learning curves.
%0 Report
%1 91a29f974fce4959967ea6759e1075f4
%A Ruhkopf, Tim
%A Mohan, Aditya
%A Deng, Difan
%A Tornede, Alexander
%A Hutter, Frank
%A Lindauer, Marius
%D 2022
%K automl leibnizailab myown
%T MASIF: Meta-learned Algorithm Selection using Implicit Fidelity Information
%X Selecting a well-performing algorithm for a given task or dataset can be time-consuming andtedious, but is crucial for the successful day-to-day business of developing new AI & MLapplications. Algorithm Selection (AS) mitigates this through a meta-model leveragingmeta-information about previous tasks. However, most of the available AS methods areerror-prone because they characterize a task by either cheap-to-compute properties of thedataset or evaluations of cheap proxy algorithms, called landmarks. In this work, we extendthe classical AS data setup to include multi-fidelity information and empirically demonstratehow meta-learning on algorithms’ learning behaviour allows us to exploit cheap test-timeevidence effectively and combat myopia significantly. We further postulate a budget-regrettrade-off w.r.t. the selection process. Our new selector MASIF is able to jointly interpretonline evidence on a task in form of varying-length learning curves without any parametricassumption by leveraging a transformer-based encoder. This opens up new possibilities forguided rapid prototyping in data science on cheaply observed partial learning curves.
@techreport{91a29f974fce4959967ea6759e1075f4,
abstract = { Selecting a well-performing algorithm for a given task or dataset can be time-consuming andtedious, but is crucial for the successful day-to-day business of developing new AI & MLapplications. Algorithm Selection (AS) mitigates this through a meta-model leveragingmeta-information about previous tasks. However, most of the available AS methods areerror-prone because they characterize a task by either cheap-to-compute properties of thedataset or evaluations of cheap proxy algorithms, called landmarks. In this work, we extendthe classical AS data setup to include multi-fidelity information and empirically demonstratehow meta-learning on algorithms{\textquoteright} learning behaviour allows us to exploit cheap test-timeevidence effectively and combat myopia significantly. We further postulate a budget-regrettrade-off w.r.t. the selection process. Our new selector MASIF is able to jointly interpretonline evidence on a task in form of varying-length learning curves without any parametricassumption by leveraging a transformer-based encoder. This opens up new possibilities forguided rapid prototyping in data science on cheaply observed partial learning curves.},
added-at = {2023-03-16T10:39:51.000+0100},
author = {Ruhkopf, Tim and Mohan, Aditya and Deng, Difan and Tornede, Alexander and Hutter, Frank and Lindauer, Marius},
biburl = {https://www.bibsonomy.org/bibtex/2814039431031e3cdeee24cdc30d51257/ail3s},
day = 2,
interhash = {9c548e868169ed2102ac14bfe6d54f05},
intrahash = {814039431031e3cdeee24cdc30d51257},
keywords = {automl leibnizailab myown},
language = {English},
month = dec,
timestamp = {2023-03-17T08:40:57.000+0100},
title = {MASIF: Meta-learned Algorithm Selection using Implicit Fidelity Information},
type = {WorkingPaper},
year = 2022
}