Recent research has put forward the concept of Fog computing, a deported intelligence for IoT networks. Fog clusters are meant to complement current cloud deployments, providing compute and storage resources directly in the access network - which is particularly useful for low-latency applications. However, Fog deployments are expected to be less elastic than cloud platforms, since elasticity in Cloud platforms comes from the scale of the data-centers. Thus, a Fog node dimensioned for the average traffic load of a given application will be unable to handle sudden bursts of traffic. In this paper, we explore such a use-case, where a Fog-based latency-sensitive application must offload some of its processing to the Cloud. We build an analytical queueing model for deriving the statistical response time of a Fog deployment under different request Load Balancing (LB) strategies, contrasting a naive, an ideal (LFU-LB, assuming a priori knowledge of the request popularity) and a practical (LRU-LB, based on online learning of the popularity with an LRU filter) scheme. Using our model, and confirming the results through simulation, we show that the LRU-LB achieves close-to- ideal performance, with high savings on Cloud offload cost with respect to a request-oblivious strategy in the explored scenarios.
%0 Conference Paper
%1 Enguehard18ITC30
%A Enguehard, Marcel
%A Carofiglio, Giovanna
%A Rossi, Dario
%B 30th International Teletraffic Congress (ITC 30)
%C Vienna, Austria
%D 2018
%K Session_2:_Mobile_Edge_Computing itc itc30
%T A Popularity-Based Approach for Effective Cloud Offload in Fog Deployments
%U https://gitlab2.informatik.uni-wuerzburg.de/itc-conference/itc-conference-public/-/raw/master/itc30/Enguehard18ITC30.pdf?inline=true
%X Recent research has put forward the concept of Fog computing, a deported intelligence for IoT networks. Fog clusters are meant to complement current cloud deployments, providing compute and storage resources directly in the access network - which is particularly useful for low-latency applications. However, Fog deployments are expected to be less elastic than cloud platforms, since elasticity in Cloud platforms comes from the scale of the data-centers. Thus, a Fog node dimensioned for the average traffic load of a given application will be unable to handle sudden bursts of traffic. In this paper, we explore such a use-case, where a Fog-based latency-sensitive application must offload some of its processing to the Cloud. We build an analytical queueing model for deriving the statistical response time of a Fog deployment under different request Load Balancing (LB) strategies, contrasting a naive, an ideal (LFU-LB, assuming a priori knowledge of the request popularity) and a practical (LRU-LB, based on online learning of the popularity with an LRU filter) scheme. Using our model, and confirming the results through simulation, we show that the LRU-LB achieves close-to- ideal performance, with high savings on Cloud offload cost with respect to a request-oblivious strategy in the explored scenarios.
@inproceedings{Enguehard18ITC30,
abstract = {Recent research has put forward the concept of Fog computing, a deported intelligence for IoT networks. Fog clusters are meant to complement current cloud deployments, providing compute and storage resources directly in the access network - which is particularly useful for low-latency applications. However, Fog deployments are expected to be less elastic than cloud platforms, since elasticity in Cloud platforms comes from the scale of the data-centers. Thus, a Fog node dimensioned for the average traffic load of a given application will be unable to handle sudden bursts of traffic. In this paper, we explore such a use-case, where a Fog-based latency-sensitive application must offload some of its processing to the Cloud. We build an analytical queueing model for deriving the statistical response time of a Fog deployment under different request Load Balancing (LB) strategies, contrasting a naive, an ideal (LFU-LB, assuming a priori knowledge of the request popularity) and a practical (LRU-LB, based on online learning of the popularity with an LRU filter) scheme. Using our model, and confirming the results through simulation, we show that the LRU-LB achieves close-to- ideal performance, with high savings on Cloud offload cost with respect to a request-oblivious strategy in the explored scenarios.},
added-at = {2018-09-12T17:41:00.000+0200},
address = {Vienna, Austria},
author = {Enguehard, Marcel and Carofiglio, Giovanna and Rossi, Dario},
biburl = {https://www.bibsonomy.org/bibtex/280a8ce032c24e22ebe5622cdc1124117/itc},
booktitle = {30th International Teletraffic Congress (ITC 30)},
interhash = {2e90f10413382366970a8771e5c84b0f},
intrahash = {80a8ce032c24e22ebe5622cdc1124117},
keywords = {Session_2:_Mobile_Edge_Computing itc itc30},
timestamp = {2020-05-24T20:14:34.000+0200},
title = {A Popularity-Based Approach for Effective Cloud Offload in Fog Deployments},
url = {https://gitlab2.informatik.uni-wuerzburg.de/itc-conference/itc-conference-public/-/raw/master/itc30/Enguehard18ITC30.pdf?inline=true},
year = 2018
}