-
Notifications
You must be signed in to change notification settings - Fork 133
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
GA features update - Having multi-objective optimization (NSGA-II ) #2326
base: devel
Are you sure you want to change the base?
Changes from 92 commits
061058e
ab4315c
8b7f5d3
3fcde82
15debe4
64510df
b1f0c3f
52389c3
391b9c3
da9e0dd
1fd2175
7cedf83
305c2ac
c820eea
21bf42d
e639803
12e11f0
be64a4d
ccde4d9
95682a1
c3688e2
e25cc37
f0d1412
c2ca46e
1f1b969
c7aebf3
64e97a9
b29661b
9626956
34d5cb2
e0df314
c0476f7
81dc580
3f27965
facf74e
a92049c
0faeb9c
e9ea9a2
dbad22c
699b3de
8cffedb
9f4eecd
2487621
3657634
285575f
7707f67
a32a45c
a9577f4
9b42d7d
f6ecb5f
8a26285
51eb867
061c3bc
59d43e1
8fb32c3
4dc0e57
4f457fe
9d27568
255b58f
f1ad2b3
fae31be
ea59893
d23ef44
df6b98d
f564f29
02a961e
ed460f9
f339bf3
261799a
677b474
cf67660
916eda0
366974e
ceb701d
35e65e7
c8ac5c9
ce0aaad
cfd5b31
9d8941c
a055d68
580cad8
06f4a46
a903939
8482a39
97a20bc
372f384
289094e
7b66055
a255665
52717ae
0abf956
5e3d977
460b79b
7916f3a
b5c7e02
bc9ab9f
f9272a4
beaba5a
163eb87
eadd2d4
bfc0157
ca2bd63
e52c609
66822ee
729187c
ba8fc08
9f77186
bce105e
3706ddd
d20eee8
b2cd4b7
014dae4
d6c84de
730d7a4
2598e56
6f0cc9e
e7ec6b9
2f5b3e6
789baea
ae71e15
945505c
8307dd1
4089ffd
e49f166
9154004
6f3e814
89ce09e
42fa539
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -150,6 +150,7 @@ def __init__(self): | |
self._paramSelectionOptions = {'ftol':1e-10, 'maxiter':200, 'disp':False} # Optimizer options for hyperparameter selection | ||
self._externalParamOptimizer = 'fmin_l_bfgs_b' # Optimizer for external hyperparameter selection | ||
self._resetModel = False # Reset regression model if True | ||
self._canHandleMultiObjective = False # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line can be moved to base class with default of False, while set it to True in GA. This will also apply to GradientDescent, and Simulated Annealing class. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Regardless of whether the base class has this line with default of False, the other algorithms will eventually have this line. saying True (if they support multi-objective optimization). Plus, I think user can feel less confused if we place self._canHandleMultiObjective = True (or False) in every class of optimizer. |
||
def handleInput(self, paramInput): | ||
""" | ||
|
@@ -232,8 +233,8 @@ def initialize(self, externalSeeding=None, solutionExport=None): | |
elif len(self._model.supervisedContainer[0].target) != 1: | ||
self.raiseAnError(RuntimeError, f'Only one target allowed when using GPR ROM for Bayesian Optimizer! ' | ||
f'Received {len(self._model.supervisedContainer[0].target)}') | ||
elif self._objectiveVar not in self._model.supervisedContainer[0].target: | ||
self.raiseAnError(RuntimeError, f'GPR ROM <target> should be obective variable: {self._objectiveVar}, ' | ||
elif self._objectiveVar[0] not in self._model.supervisedContainer[0].target: | ||
self.raiseAnError(RuntimeError, f'GPR ROM <target> should be obective variable: {self._objectiveVar[0]}, ' | ||
f'Received {self._model.supervisedContainer[0].target}') | ||
|
||
if self._resetModel: | ||
|
@@ -265,8 +266,8 @@ def initialize(self, externalSeeding=None, solutionExport=None): | |
trainingData = self.normalizeData(trainingData) | ||
for varName in self.toBeSampled.keys(): | ||
self._trainingInputs[0][varName] = list(trainingData[varName]) | ||
self._trainingTargets.append(list(trainingData[self._objectiveVar])) | ||
self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar])} samples!", | ||
self._trainingTargets.append(list(trainingData[self._objectiveVar[0]])) | ||
self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar[0]])} samples!", | ||
"This pre-trained ROM will be used by Optimizer to evaluate the next best point!") | ||
# retrieving the best solution is based on the acqusition function's utility | ||
# Constraints are considered in the following method. | ||
|
@@ -333,7 +334,7 @@ def _useRealization(self, info, rlz): | |
# Add new inputs and model evaluations to the dataset | ||
for varName in list(self.toBeSampled): | ||
self._trainingInputs[traj][varName].extend(getattr(rlz, varName).values) | ||
self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar).values) | ||
self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar[0]).values) | ||
# Generate posterior with training data | ||
self._generatePredictiveModel(traj) | ||
self._resolveMultiSample(traj, rlz, info) | ||
|
@@ -343,10 +344,10 @@ def _useRealization(self, info, rlz): | |
# Add new input and model evaluation to the dataset | ||
for varName in list(self.toBeSampled): | ||
self._trainingInputs[traj][varName].append(rlz[varName]) | ||
self._trainingTargets[traj].append(rlz[self._objectiveVar]) | ||
self._trainingTargets[traj].append(rlz[self._objectiveVar[0]]) | ||
# Generate posterior with training data | ||
self._generatePredictiveModel(traj) | ||
optVal = rlz[self._objectiveVar] | ||
optVal = rlz[self._objectiveVar[0]] | ||
self._resolveNewOptPoint(traj, rlz, optVal, info) | ||
|
||
# Use acquisition to select next point | ||
|
@@ -555,7 +556,7 @@ def _trainRegressionModel(self, traj): | |
|
||
for varName in list(self.toBeSampled): | ||
trainingSet[varName] = np.asarray(self._trainingInputs[traj][varName]) | ||
trainingSet[self._objectiveVar] = np.asarray(self._trainingTargets[traj]) | ||
trainingSet[self._objectiveVar[0]] = np.asarray(self._trainingTargets[traj]) | ||
self._model.train(trainingSet) | ||
# NOTE It would be preferrable to use targetEvaluation; | ||
# however, there does not appear a built in normalization method and as | ||
|
@@ -596,8 +597,8 @@ def _evaluateRegressionModel(self, featurePoint): | |
# Evaluating the regression model | ||
resultsDict = self._model.evaluate(featurePoint) | ||
# NOTE only allowing single targets, needs to be fixed when multi-objective optimization is added | ||
mu = resultsDict[self._objectiveVar] | ||
std = resultsDict[self._objectiveVar+'_std'] | ||
mu = resultsDict[self._objectiveVar[0]] | ||
std = resultsDict[self._objectiveVar[0]+'_std'] | ||
return mu, std | ||
|
||
# * * * * * * * * * * * * | ||
|
@@ -627,7 +628,7 @@ def _resolveMultiSample(self, traj, rlz, info): | |
for index in range(info['batchSize']): | ||
for varName in rlzVars: | ||
singleRlz[varName] = getattr(rlz, varName)[index].values | ||
optVal = singleRlz[self._objectiveVar] | ||
optVal = singleRlz[self._objectiveVar[0]] | ||
self._resolveNewOptPoint(traj, singleRlz, optVal, info) | ||
singleRlz = {} # FIXME is this necessary? | ||
self.raiseADebug(f'Multi-sample resolution completed') | ||
|
@@ -664,7 +665,7 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): | |
currentPoint = {} | ||
for decisionVarName in list(self.toBeSampled): | ||
currentPoint[decisionVarName] = rlz[decisionVarName] | ||
rlz[self._objectiveVar] = self._evaluateRegressionModel(currentPoint)[0][0] | ||
rlz[self._objectiveVar[0]] = self._evaluateRegressionModel(currentPoint)[0][0] | ||
self.raiseADebug('*' * 80) | ||
if acceptable in ['accepted', 'first']: | ||
# record history | ||
|
@@ -675,13 +676,13 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): | |
# If the last recommended solution point is the same, update the expected function value | ||
if all(old[var] == xStar[var] for var in list(self.toBeSampled)): | ||
newEstimate = copy.copy(old) | ||
newEstimate[self._objectiveVar] = muStar | ||
newEstimate[self._objectiveVar[0]] = muStar | ||
self._optPointHistory[traj].append((newEstimate, info)) | ||
else: | ||
newRealization = copy.copy(old) | ||
for var in list(self.toBeSampled): | ||
newRealization[var] = xStar[var] | ||
newRealization[self._objectiveVar] = muStar | ||
newRealization[self._objectiveVar[0]] = muStar | ||
else: | ||
self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"') | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It seems there are some diverges between this branch and devel. The changes are already in devel branch, but still show in this pull request. @Jimmy-INL Could you try to rebase your branch with devel branch?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is Junyung's branch, I tried to fix the failing tests. I will check who will address it, but thanks a lot for the comments.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we actually copied this from devel.