Пожалуйста, войдите в систему, чтобы принять участие в дискуссии (добавить собственные рецензию, или комментарий)
Цитировать эту публикацию
%0 Journal Article
%1 journals/corr/abs-2406-12016
%A Son, Seungwoo
%A Park, Wonpyo
%A Han, Woohyun
%A Kim, Kyuyeun
%A Lee, Jaeho
%D 2024
%J CoRR
%K dblp
%T Prefixing Attention Sinks can Mitigate Activation Outliers for Large Language Model Quantization.
%U http://dblp.uni-trier.de/db/journals/corr/corr2406.html#abs-2406-12016
%V abs/2406.12016
@article{journals/corr/abs-2406-12016,
added-at = {2024-07-19T00:00:00.000+0200},
author = {Son, Seungwoo and Park, Wonpyo and Han, Woohyun and Kim, Kyuyeun and Lee, Jaeho},
biburl = {https://www.bibsonomy.org/bibtex/26e6ce976a5be253f1f68ece2be4e5498/dblp},
ee = {https://doi.org/10.48550/arXiv.2406.12016},
interhash = {7f40b6101d98d8b64cbcd712e2975854},
intrahash = {6e6ce976a5be253f1f68ece2be4e5498},
journal = {CoRR},
keywords = {dblp},
timestamp = {2024-07-22T07:11:37.000+0200},
title = {Prefixing Attention Sinks can Mitigate Activation Outliers for Large Language Model Quantization.},
url = {http://dblp.uni-trier.de/db/journals/corr/corr2406.html#abs-2406-12016},
volume = {abs/2406.12016},
year = 2024
}