Ensuring the reliability of critical industrial systems
across various sectors is crucial. It is essential to detect deviations
from regular behaviour to mitigate disruptions and preserve
infrastructure integrity. However, accurately labelling anomaly
datasets is challenging due to their rarity and manual annotation
subjectivity. The conventional approach of training separate models
for each dataset entity further complicates model development.
This paper presents a novel Multi-task Learning framework
combining LSTM Autoencoder with temporal attention mechanism
(MTL-LATAM) for effective time series anomaly detection. Multitask learning models improve adaptability and generalizability,
leading to reduced runtime and compute power while supporting
zero-shot evaluation. These models offer flexibility in detecting
emerging anomalies. Additionally, we introduce a dynamic thresholding mechanism to incorporate temporal context for anomaly
detection and provide visualizations of attention weights to enhance
interpretability. The study compares MTL- LATAM, with other
multi-task models, evaluates multi-task versus single-task models
and assesses the performance of the proposed frame- work in
zero-shot learning scenarios. The findings indicate MTL- LATAM’s
effectiveness across real-world and open-source datasets, achieving
95% and 97% task synergy. The results underscore the superior
performance of multi-task models in zero-shot tasks compared to
individual models trained exclusively on their respective datasets.
%0 Conference Paper
%1 nivarthi2024multi
%A Nivarthi, Chandana Priya
%A Huang, Zhixin
%A Gruhl, Christian
%A Sick, Bernhard
%B International Joint Conference on Neural Networks (IJCNN)
%D 2024
%I IEEE
%K imported itegpub isac-www
%P 1--10
%R 10.1109/IJCNN60899.2024.10651344
%T Multi-Task Representation Learning with Temporal Attention for Zero-Shot Time Series Anomaly Detection
%X Ensuring the reliability of critical industrial systems
across various sectors is crucial. It is essential to detect deviations
from regular behaviour to mitigate disruptions and preserve
infrastructure integrity. However, accurately labelling anomaly
datasets is challenging due to their rarity and manual annotation
subjectivity. The conventional approach of training separate models
for each dataset entity further complicates model development.
This paper presents a novel Multi-task Learning framework
combining LSTM Autoencoder with temporal attention mechanism
(MTL-LATAM) for effective time series anomaly detection. Multitask learning models improve adaptability and generalizability,
leading to reduced runtime and compute power while supporting
zero-shot evaluation. These models offer flexibility in detecting
emerging anomalies. Additionally, we introduce a dynamic thresholding mechanism to incorporate temporal context for anomaly
detection and provide visualizations of attention weights to enhance
interpretability. The study compares MTL- LATAM, with other
multi-task models, evaluates multi-task versus single-task models
and assesses the performance of the proposed frame- work in
zero-shot learning scenarios. The findings indicate MTL- LATAM’s
effectiveness across real-world and open-source datasets, achieving
95% and 97% task synergy. The results underscore the superior
performance of multi-task models in zero-shot tasks compared to
individual models trained exclusively on their respective datasets.
@inproceedings{nivarthi2024multi,
abstract = {Ensuring the reliability of critical industrial systems
across various sectors is crucial. It is essential to detect deviations
from regular behaviour to mitigate disruptions and preserve
infrastructure integrity. However, accurately labelling anomaly
datasets is challenging due to their rarity and manual annotation
subjectivity. The conventional approach of training separate models
for each dataset entity further complicates model development.
This paper presents a novel Multi-task Learning framework
combining LSTM Autoencoder with temporal attention mechanism
(MTL-LATAM) for effective time series anomaly detection. Multitask learning models improve adaptability and generalizability,
leading to reduced runtime and compute power while supporting
zero-shot evaluation. These models offer flexibility in detecting
emerging anomalies. Additionally, we introduce a dynamic thresholding mechanism to incorporate temporal context for anomaly
detection and provide visualizations of attention weights to enhance
interpretability. The study compares MTL- LATAM, with other
multi-task models, evaluates multi-task versus single-task models
and assesses the performance of the proposed frame- work in
zero-shot learning scenarios. The findings indicate MTL- LATAM’s
effectiveness across real-world and open-source datasets, achieving
95% and 97% task synergy. The results underscore the superior
performance of multi-task models in zero-shot tasks compared to
individual models trained exclusively on their respective datasets.},
added-at = {2024-09-17T10:26:16.000+0200},
author = {Nivarthi, Chandana Priya and Huang, Zhixin and Gruhl, Christian and Sick, Bernhard},
biburl = {https://www.bibsonomy.org/bibtex/22e6c89baaff1de49cf4a05ebcac6c635/ies},
booktitle = {International Joint Conference on Neural Networks (IJCNN)},
doi = {10.1109/IJCNN60899.2024.10651344},
interhash = {0a066d9e552190523535881a0c9f1654},
intrahash = {2e6c89baaff1de49cf4a05ebcac6c635},
keywords = {imported itegpub isac-www},
pages = {1--10},
publisher = {IEEE},
timestamp = {2024-09-17T10:26:16.000+0200},
title = {Multi-Task Representation Learning with Temporal Attention for Zero-Shot Time Series Anomaly Detection},
year = 2024
}