Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

trivial Baysian Optimization raises TypeError #117

Open
Permafacture opened this issue Aug 15, 2022 · 0 comments
Open

trivial Baysian Optimization raises TypeError #117

Permafacture opened this issue Aug 15, 2022 · 0 comments

Comments

@Permafacture
Copy link

I don't know if this is a sherpa issue or GPyOpt, but since sherpa is calling gPyOpt and determines what versions of libraries are installed, I think this is the place to start.

This minimal example raises an error

search_params = [ sherpa.Continuous('lr', [0.001, 0.1], 'log')]
algorithm = sherpa.algorithms.GPyOpt(max_num_trials=50)
study = sherpa.Study(parameters=search_params, algorithm=algorithm, lower_is_better=True, disable_dashboard=True)
for trial in study:
    loss = trial.parameters['lr']
    study.add_observation(trial=trial, iteration=i, objective=loss)
    study.finalize(trial=trial)
    study.save('.')

give this output

INFO:GP:initializing Y
INFO:GP:initializing inference method
INFO:GP:adding kernel and likelihood as parameters
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Input In [205], in <cell line: 14>()
      8 algorithm2 = sherpa.algorithms.GPyOpt(max_num_trials=50)
      9 study2 = sherpa.Study(parameters=search_params,
     10                       algorithm=algorithm2,
     11                       lower_is_better=True,
     12                       disable_dashboard=True)
---> 14 for trial2 in study2:
     15     loss = trial2.parameters['lr']
     16     study2.add_observation(trial=trial2, iteration=i, objective=loss)

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/sherpa/core.py:376, in Study.__next__(self)
    372 def __next__(self):
    373     """
    374     Allows to write `for trial in study:`.
    375     """
--> 376     t = self.get_suggestion()
    377     if isinstance(t, Trial):
    378         return t

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/sherpa/core.py:214, in Study.get_suggestion(self)
    211 if len(self._trial_queue) != 0:
    212     return self._trial_queue.popleft()
--> 214 p = self.algorithm.get_suggestion(self.parameters, self.results,
    215                                   self.lower_is_better)
    216 if isinstance(p, dict):
    217     self.num_trials += 1

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/sherpa/algorithms/bayesian_optimization.py:109, in GPyOpt.get_suggestion(self, parameters, results, lower_is_better)
    105 self.next_trials.clear()
    107 X, y, y_var = self._prepare_data_for_bayes_opt(parameters, results)
--> 109 batch = self._generate_bayesopt_batch(self.domain, X, y, y_var,
    110                                       lower_is_better)
    112 batch_list_of_dicts = self._reverse_to_sherpa_format(batch,
    113                                                      parameters)
    115 self.next_trials.extend(batch_list_of_dicts)

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/sherpa/algorithms/bayesian_optimization.py:149, in GPyOpt._generate_bayesopt_batch(self, domain, X, y, y_var, lower_is_better)
    137     kwargs = {'model_type': self.model_type}
    139 bo_step = gpyopt_package.methods.BayesianOptimization(f=None,
    140                                                       domain=domain,
    141                                                       X=X, Y=y_adjusted,
   (...)
    147                                                       exact_feval=False,
    148                                                       **kwargs)
--> 149 return bo_step.suggest_next_locations()

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/GPyOpt/core/bo.py:69, in BO.suggest_next_locations(self, context, pending_X, ignored_X)
     66 self.context = context
     67 self._update_model(self.normalization_type)
---> 69 suggested_locations = self._compute_next_evaluations(pending_zipped_X = pending_X, ignored_zipped_X = ignored_X)
     71 return suggested_locations

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/GPyOpt/core/bo.py:236, in BO._compute_next_evaluations(self, pending_zipped_X, ignored_zipped_X)
    233     duplicate_manager = None
    235 ### We zip the value in case there are categorical variables
--> 236 return self.space.zip_inputs(self.evaluator.compute_batch(duplicate_manager=duplicate_manager, context_manager= self.acquisition.optimizer.context_manager))

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/GPyOpt/core/evaluators/batch_local_penalization.py:37, in LocalPenalization.compute_batch(self, duplicate_manager, context_manager)
     33 k=1
     35 if self.batch_size >1:
     36     # ---------- Approximate the constants of the the method
---> 37     L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds())
     38     Min = self.acquisition.model.model.Y.min()
     40 # --- GET the remaining elements

File ~/code/trace2vec/.env-trace2vec/lib/python3.10/site-packages/GPyOpt/core/evaluators/batch_local_penalization.py:67, in estimate_L(model, bounds, storehistory)
     65 x0 = samples[np.argmin(pred_samples)]
     66 res = scipy.optimize.minimize(df,x0, method='L-BFGS-B',bounds=bounds, args = (model,x0), options = {'maxiter': 200})
---> 67 minusL = res.fun[0][0]
     68 L = -minusL
     69 if L<1e-7: L=10  ## to avoid problems in cases in which the model is flat.

TypeError: 'float' object is not subscriptable

And res.fun is a float

Versions:
python 3.10
Sherpa 1.0.6
GPyOpt 1.2.6
scipy 1.9.0

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant