2020 |
|
2. | Perera, Malinga R; Oetomo, Bastian; Rubinstein, Benjamin I P; Borovica-Gajic, Renata DBA bandits: Self-driving index tuning under ad-hoc, analytical workloads with safety guarantees Unpublished 2020. @unpublished{perera2020dba, title = {DBA bandits: Self-driving index tuning under ad-hoc, analytical workloads with safety guarantees}, author = {Malinga R Perera and Bastian Oetomo and Benjamin I P Rubinstein and Renata Borovica-Gajic}, url = {https://arxiv.org/abs/2010.09208}, year = {2020}, date = {2020-01-01}, abstract = {Automating physical database design has remained a long-term interest in database research due to substantial performance gains afforded by optimised structures. Despite significant progress, a majority of today's commercial solutions are highly manual, requiring offline invocation by database administrators (DBAs) who are expected to identify and supply representative training workloads. Unfortunately, the latest advancements like query stores provide only limited support for dynamic environments. This status quo is untenable: identifying representative static workloads is no longer realistic; and physical design tools remain susceptible to the query optimiser's cost misestimates (stemming from unrealistic assumptions such as attribute value independence and uniformity of data distribution). We propose a self-driving approach to online index selection that eschews the DBA and query optimiser, and instead learns the benefits of viable structures through strategic exploration and direct performance observation. We view the problem as one of sequential decision making under uncertainty, specifically within the bandit learning setting. Multi-armed bandits balance exploration and exploitation to provably guarantee average performance that converges to a fixed policy that is optimal with perfect hindsight. Our comprehensive empirical results demonstrate up to 75% speed-up on shifting and ad-hoc workloads and 28% speed-up on static workloads compared against a state-of-the-art commercial tuning tool. }, keywords = {}, pubstate = {published}, tppubtype = {unpublished} } Automating physical database design has remained a long-term interest in database research due to substantial performance gains afforded by optimised structures. Despite significant progress, a majority of today's commercial solutions are highly manual, requiring offline invocation by database administrators (DBAs) who are expected to identify and supply representative training workloads. Unfortunately, the latest advancements like query stores provide only limited support for dynamic environments. This status quo is untenable: identifying representative static workloads is no longer realistic; and physical design tools remain susceptible to the query optimiser's cost misestimates (stemming from unrealistic assumptions such as attribute value independence and uniformity of data distribution). We propose a self-driving approach to online index selection that eschews the DBA and query optimiser, and instead learns the benefits of viable structures through strategic exploration and direct performance observation. We view the problem as one of sequential decision making under uncertainty, specifically within the bandit learning setting. Multi-armed bandits balance exploration and exploitation to provably guarantee average performance that converges to a fixed policy that is optimal with perfect hindsight. Our comprehensive empirical results demonstrate up to 75% speed-up on shifting and ad-hoc workloads and 28% speed-up on static workloads compared against a state-of-the-art commercial tuning tool. |
2019 |
|
1. | Oetomo, Bastian; Perera, Malinga; Borovica-Gajic, Renata; Rubinstein, Benjamin I P A Note on Bounding Regret of the C2UCB Contextual Combinatorial Bandit Unpublished 2019. @unpublished{DBLP:journals/corr/abs-1902-07500, title = {A Note on Bounding Regret of the C2UCB Contextual Combinatorial Bandit}, author = {Bastian Oetomo and Malinga Perera and Renata Borovica-Gajic and Benjamin I P Rubinstein}, url = {https://arxiv.org/abs/1902.07500}, year = {2019}, date = {2019-01-01}, journal = {CoRR}, volume = {abs/1902.07500}, abstract = {We revisit the proof by Qin et al. (2014) of bounded regret of the C2UCB contextual combinatorial bandit. We demonstrate an error in the proof of volumetric expansion of the moment matrix, used in upper bounding a function of context vector norms. We prove a relaxed inequality that yields the originally-stated regret bound. }, keywords = {}, pubstate = {published}, tppubtype = {unpublished} } We revisit the proof by Qin et al. (2014) of bounded regret of the C2UCB contextual combinatorial bandit. We demonstrate an error in the proof of volumetric expansion of the moment matrix, used in upper bounding a function of context vector norms. We prove a relaxed inequality that yields the originally-stated regret bound. |
Last updated by Romesh Maling Perera.