-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdistributed_example.py
More file actions
38 lines (33 loc) · 1.52 KB
/
distributed_example.py
File metadata and controls
38 lines (33 loc) · 1.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from pathlib import Path
import numpy as np
from optimizers import (AcceleratedExtraGradient, OGMG,
AcceleratedGradientDescent, GradientDescent)
from problems import RandomDistributedRidgeRegression
if __name__ == "__main__":
NUM_WORKERS: int = 10
problem = RandomDistributedRidgeRegression(3000, 3000, lmbd=0.1,
gaussian_sigma=0.5, num_workers=NUM_WORKERS)
X_sample = problem.x_clean
x_init = np.zeros(X_sample.shape[1])
print(f"Initial error: {problem.q(x_init) + problem.p(x_init)}")
mu: float = 0.1
Lq: float = 1.
Lp: float = 10.
if Path("x_solution.npy").exists():
x_best = np.load("x_solution.npy")
else:
opt = GradientDescent(problem.r, problem.grad_r, x_init,
liepshitz_const=(Lp + Lq), log=True)
x_best = opt.optimize(3000)
np.save("x_solution.npy", x_best)
opt = AcceleratedExtraGradient(problem.q, problem.grad_q,
problem.p, problem.grad_p,
OGMG, x_init, mu, Lq, Lp, True)
x_opt = opt.optimize(1000)
print(f"Final error of AEGD: {np.linalg.norm(x_best - x_opt[-1])}")
np.save("x_AEGD.npy", np.stack(x_opt))
opt = AcceleratedGradientDescent(problem.r, problem.grad_r,
x_init, (Lq + Lp), mu, True)
x_opt = opt.optimize(1000)
print(f"Final error of AGD: {np.linalg.norm(x_best - x_opt[-1])}")
np.save("x_AGD.npy", np.stack(x_opt))