From 19626e4318e7202f5abb3952e8b8261f215a8080 Mon Sep 17 00:00:00 2001 From: Jae-Won Chung Date: Wed, 11 Feb 2026 09:01:56 -0500 Subject: [PATCH] Add Systems + AI topic to GPU-to-Grid --- source/_data/SymbioticLab.bib | 1 + 1 file changed, 1 insertion(+) diff --git a/source/_data/SymbioticLab.bib b/source/_data/SymbioticLab.bib index 8b67a66e..640ec01f 100644 --- a/source/_data/SymbioticLab.bib +++ b/source/_data/SymbioticLab.bib @@ -2308,6 +2308,7 @@ @Article{gputogrid:arxiv26 publist_confkey = {arXiv:2602.05116}, publist_link = {paper || https://arxiv.org/abs/2602.05116}, publist_topic = {Energy-Efficient Systems}, + publist_topic = {Systems + AI}, publist_abstract = { While the rapid expansion of data centers poses challenges for power grids, it also offers new opportunities as potentially flexible loads. Existing power system research often abstracts data centers as aggregate resources, while computer system research primarily focuses on optimizing GPU energy efficiency and largely ignores the grid impacts of optimized GPU power consumption. To bridge this gap, we develop a GPU-to-Grid framework that couples device-level GPU control with power system objectives. We study distribution-level voltage regulation enabled by flexibility in LLM inference, using batch size as a control knob that trades off the voltage impacts of GPU power consumption against inference latency and token throughput. We first formulate this problem as an optimization problem and then realize it as an online feedback optimization controller that leverages measurements from both the power grid and GPU systems. Our key insight is that reducing GPU power consumption alleviates violations of lower voltage limits, while increasing GPU power mitigates violations near upper voltage limits in distribution systems; this runs counter to the common belief that minimizing GPU power consumption is always beneficial to power grids. }