Replies: 1 comment 5 replies
-
Hi @YouHyin. Looks like you have quite poor model fit here. Since the range of your training observations (
with
|
Beta Was this translation helpful? Give feedback.
5 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
I want to visualize the Bayesian optimization process.
So I drew a plot by the following code ;
import pybamm
import torch
import numpy as np
from tqdm import tqdm
from botorch.fit import fit_gpytorch_model
from botorch.models import SingleTaskGP
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.optim import optimize_acqf
from botorch.acquisition import ExpectedImprovement, UpperConfidenceBound
import matplotlib.pyplot as plt
def fortemplimit(x,k,a) :
model = pybamm.lithium_ion.DFN({"thermal": "lumped"})
parameter_values = model.default_parameter_values
experiment = pybamm.Experiment(
[
(
"Charge at " + str(x) + " C for " + str(a) + " seconds",
"Rest for " + str(k) + " seconds",
"Hold at 4.1 V until 50 mA",
)
]
,
termination="80% capacity",
)
sim = pybamm.Simulation(
model, experiment=experiment, solver=pybamm.CasadiSolver("fast with events"),parameter_values=parameter_values,
)
sim.solve()
solution = sim.solution
return (-1) * solution["Time [s]"].entries[-1]
def experiment_func(x):
model = pybamm.lithium_ion.DFN({"thermal": "lumped"})
parameter_values = model.default_parameter_values
#fast_solver = pybamm.CasadiSolver(atol=1e-3, rtol=1e-3, mode="fast")
vmax = 4.1
experiment = pybamm.Experiment(
[
(
"Charge at " + str(x) + " C until 4.1 V",
"Hold at 4.1 V until 50 mA",
#print(experiment_func(2.7))
x1=np.linspace(1, 3, 4)
x2=np.linspace(1,3,30)
real_x=x1.reshape(-1,1)
real_x2=x2.reshape(-1,1)
train_x=torch.from_numpy(real_x)
train_x2=torch.from_numpy(real_x2)
train_x=train_x.view([-1,1])
train_x2=train_x2.view([-1,1])
x=train_x2
print(x.shape)
#print(train_x)
#print(train_x.dim())
Y=[]
for i in x1 :
Y.append(experiment_func(i))
real_Y=np.array(Y).reshape(-1,1)
train_obj=torch.from_numpy(real_Y)
train_obj=train_obj.view([-1,1])
Y2=[]
for k in x2 :
Y2.append(experiment_func(k))
real_Y2=np.array(Y2).reshape(-1,1)
train_y2=torch.from_numpy(real_Y2)
train_y2=train_y2.view([-1,1])
y=train_y2
print(y.shape)
print(y)
build GP model
initialize a GP with data
use likelihood to find GP params
def get_model(train_x, train_y, state_dict=None, debug=False):
gp = SingleTaskGP(train_x, train_y)
if debug:
print("Prior hyperparams lengthscale & noise: {}, {}".format(gp.covar_module.base_kernel.lengthscale.item(), gp.likelihood.noise.item()))
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
if state_dict is not None:
gp.load_state_dict(state_dict) # speeds up fit
fit_gpytorch_model(mll) # performs the hyperparam fit
if debug:
print("Post hyperparams lengthscale & noise: {}, {}".format(gp.covar_module.base_kernel.lengthscale.item(), gp.likelihood.noise.item()))
return gp, mll
model, mll = get_model(train_x, train_obj,debug=True)
train_x_explore = torch.clone(train_x)
train_obj_explore = torch.clone(train_obj)
model_explore, mll_explore = get_model(train_x, train_obj,debug=True)
def get_max(model,beta):
UCB = UpperConfidenceBound(model=model, beta=beta)
new_point_analytic, acq_value_list = optimize_acqf(
acq_function=UCB,
bounds=torch.tensor([[1.], [3.]]),
q=1,
num_restarts=50,
raw_samples=10000,
options={},
return_best_only=True,
sequential=False
)
return new_point_analytic, acq_value_list
def step(model, mll, train_x, train_obj, beta=5.):
# optimize acquisition function
UCB = UpperConfidenceBound(model=model, beta=beta)
new_point_analytic, acq_value_list = optimize_acqf(
acq_function=UCB,
bounds=torch.tensor([[1.], [3.]]),
q=1,
num_restarts=50,
raw_samples=10000,
options={},
return_best_only=True,
sequential=False
)
print(new_point_analytic.item())
smth_1 = experiment_func(new_point_analytic.item())
def plot():
with torch.no_grad():
mean_preds = model(x).mean
std_preds = model(x).stddev
lower, upper = model(x).confidence_region()
mean_preds_explore = model_explore(x).mean
lower_explore, upper_explore = model_explore(x).confidence_region()
std_preds_explore = model_explore(x).stddev
plot()
model, mll, train_x, train_obj = step(model, mll, train_x, train_obj, beta=1.)
model_explore, mll_explore, train_x_explore, train_obj_explore = step(model_explore, mll_explore, train_x_explore, train_obj_explore,beta=25.)
plot()
model, mll, train_x, train_obj = step(model, mll, train_x, train_obj, beta=1.)
model_explore, mll_explore, train_x_explore, train_obj_explore = step(model_explore, mll_explore, train_x_explore, train_obj_explore,beta=25.)
plot()
model, mll, train_x, train_obj = step(model, mll, train_x, train_obj, beta=1.)
model_explore, mll_explore, train_x_explore, train_obj_explore = step(model_explore, mll_explore, train_x_explore, train_obj_explore,beta=25.)
plot()
The results are shown in Figure 1.
<Figure.1>
As you can see from this picture, the plot of the Gaussian process is wrong. When forming the Gaussian process, it seems that mean and uncertainty are not aligned with the observed point, but with the acquisition function.
I wanted to draw like picture 2.
Please help me. I would appreciate it if you could help me.
Beta Was this translation helpful? Give feedback.
All reactions