diff --git a/kernel_tuner/interface.py b/kernel_tuner/interface.py index 32e91c86..0641eb7e 100644 --- a/kernel_tuner/interface.py +++ b/kernel_tuner/interface.py @@ -65,6 +65,7 @@ pyatf_strategies, random_sample, simulated_annealing, + skopt ) from kernel_tuner.strategies.wrapper import OptAlgWrapper @@ -82,6 +83,7 @@ "mls": mls, "pso": pso, "simulated_annealing": simulated_annealing, + "skopt": skopt, "firefly_algorithm": firefly_algorithm, "bayes_opt": bayes_opt, "pyatf_strategies": pyatf_strategies, @@ -394,6 +396,7 @@ def __deepcopy__(self, _): * "pso" particle swarm optimization * "random_sample" takes a random sample of the search space * "simulated_annealing" simulated annealing strategy + * "skopt" uses the minimization methods from `skopt` Strategy-specific parameters and options are explained under strategy_options. diff --git a/kernel_tuner/strategies/common.py b/kernel_tuner/strategies/common.py index 9ffe999b..b51274ce 100644 --- a/kernel_tuner/strategies/common.py +++ b/kernel_tuner/strategies/common.py @@ -73,6 +73,7 @@ def __init__( snap=True, return_invalid=False, return_raw=None, + invalid_value=sys.float_info.max, ): """An abstract method to handle evaluation of configurations. @@ -100,6 +101,7 @@ def __init__( self.return_raw = f"{tuning_options['objective']}s" self.results = [] self.budget_spent_fraction = 0.0 + self.invalid_return_value = invalid_value def __call__(self, x, check_restrictions=True): @@ -168,7 +170,7 @@ def __call__(self, x, check_restrictions=True): else: # this is not a valid configuration, replace with float max if needed if not self.return_invalid: - return_value = sys.float_info.max + return_value = self.invalid_return_value # include raw data in return if requested if self.return_raw is not None: diff --git a/kernel_tuner/strategies/skopt.py b/kernel_tuner/strategies/skopt.py new file mode 100644 index 00000000..977a47d5 --- /dev/null +++ b/kernel_tuner/strategies/skopt.py @@ -0,0 +1,116 @@ +"""The strategy that uses the optimizer from skopt for searching through the parameter space.""" + +import numpy as np +from kernel_tuner.util import StopCriterionReached +from kernel_tuner.searchspace import Searchspace +from kernel_tuner.strategies.common import ( + CostFunc, + get_options, + get_strategy_docstring, +) + +supported_learners = ["RF", "ET", "GBRT", "DUMMY", "GP"] +supported_acq = ["LCB", "EI", "PI","gp_hedge"] +supported_liars = ["cl_min", "cl_mean", "cl_max"] + +_options = dict( + learner=(f"The leaner to use (supported: {supported_learners})", "RF"), + acq_func=(f"The acquisition function to use (supported: {supported_acq})", "gp_hedge"), + lie_strategy=(f"The lie strategy to use when using batches (supported: {supported_liars})", "cl_max"), + kappa=("The value of kappa", 1.96), + num_initial=("Number of initial samples. If `None`, let skopt choose the initial population", None), + batch_size=("The number of points to ask per batch", 1), + skopt_kwargs=("Additional options passed to the skopt `Optimizer` as kwargs.", dict()), +) + + +def tune(searchspace: Searchspace, runner, tuning_options): + learner, acq_func, lie_strategy, kappa, num_initial, batch_size, skopt_kwargs = \ + get_options(tuning_options.strategy_options, _options) + + # Get maximum number of evaluations + max_fevals = min(tuning_options.get("max_fevals", np.inf), searchspace.size) + + # Const function + opt_config, opt_result = None, None + + # The dimensions. Parameters with one value become categorical + from skopt.space.space import Categorical, Integer + tune_params_values = list(searchspace.tune_params.values()) + bounds = [Integer(0, len(p) - 1) if len(p) > 1 else Categorical([0]) for p in tune_params_values] + + # Space constraint + space_constraint = lambda x: searchspace.is_param_config_valid( + searchspace.get_param_config_from_param_indices(x)) + + # Create skopt optimizer + skopt_kwargs = dict(skopt_kwargs) + skopt_kwargs["base_estimator"] = learner + skopt_kwargs["acq_func"] = acq_func + + # Only set n_initial_points if not None + if num_initial is not None: + skopt_kwargs["n_initial_points"] = num_initial + + # Set kappa is not None + if kappa is not None: + skopt_kwargs.setdefault("acq_func_kwargs", {})["kappa"] = kappa + + if tuning_options.verbose: + print(f"Initialize scikit-optimize Optimizer object: {skopt_kwargs}") + + from skopt import Optimizer as SkOptimizer + optimizer = SkOptimizer( + dimensions=bounds, + space_constraint=space_constraint, + **skopt_kwargs + ) + + # Ask initial batch of configs + num_initial = optimizer._n_initial_points + batch = optimizer.ask(num_initial, lie_strategy) + xs, ys = [], [] + eval_count = 0 + + if tuning_options.verbose: + print(f"Asked optimizer for {num_initial} points: {batch}") + + # Create cost function + cost_func = CostFunc(searchspace, tuning_options, runner) + x0 = cost_func.get_start_pos() + + # Add x0 if the user has requested it + if x0 is not None: + batch.insert(0, searchspace.get_param_indices(x0)) + + try: + while eval_count < max_fevals: + if not batch: + optimizer.tell(xs, ys) + batch = optimizer.ask(batch_size, lie_strategy) + xs, ys = [], [] + + if tuning_options.verbose: + print(f"Asked optimizer for {batch_size} points: {batch}") + + x = batch.pop(0) + y = cost_func(searchspace.get_param_config_from_param_indices(x)) + eval_count += 1 + + xs.append(x) + ys.append(y) + + if opt_result is None or y < opt_result: + opt_config, opt_result = x, y + + except StopCriterionReached as e: + if tuning_options.verbose: + print(e) + + if opt_result is not None and tuning_options.verbose: + print(f"Best configuration: {opt_config}") + + return cost_func.results + + +tune.__doc__ = get_strategy_docstring("skopt minimize", _options) diff --git a/pyproject.toml b/pyproject.toml index ffc0583b..47040753 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,6 +128,7 @@ nox-poetry = "^1.0.3" ruff = "^0.4.8" pep440 = "^0.1.2" tomli = "^2.0.1" # held back by Python <= 3.10, can be replaced by built-in [tomllib](https://docs.python.org/3.11/library/tomllib.html) from Python 3.11 onwards +scikit-optimize = "0.10.2" # development dependencies are unused for now, as this is already covered by test and docs # # ATTENTION: if anything is changed here, run `poetry update`