Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions examples/3_multi_objective/1_schaffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,15 +78,13 @@ def plot_from_smac(smac: AbstractFacade) -> None:
deterministic=True, # Only one seed
n_trials=150,
objectives=["metric1", "metric2"],
objective_weights=[1, 2] # Weight metric2 twice as much as metric1
)

smac = HPOFacade(
scenario=scenario,
target_function=target_function,
multi_objective_algorithm=HPOFacade.get_multi_objective_algorithm(
scenario,
objective_weights=[1, 2], # Weight metric2 twice as much as metric1
),
multi_objective_algorithm=HPOFacade.get_multi_objective_algorithm(scenario),
overwrite=True,
)
incumbents = smac.optimize()
Expand Down
10 changes: 1 addition & 9 deletions smac/facade/algorithm_configuration_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,22 +150,14 @@ def get_random_design( # type: ignore
@staticmethod
def get_multi_objective_algorithm( # type: ignore
scenario: Scenario,
*,
objective_weights: list[float] | None = None,
) -> MeanAggregationStrategy:
"""Returns the mean aggregation strategy for the multi objective algorithm.

Parameters
----------
scenario : Scenario
objective_weights : list[float] | None, defaults to None
Weights for averaging the objectives in a weighted manner. Must be of the same length as the number of
objectives.
"""
return MeanAggregationStrategy(
scenario=scenario,
objective_weights=objective_weights,
)
return MeanAggregationStrategy(scenario=scenario)

@staticmethod
def get_runhistory_encoder(scenario: Scenario) -> RunHistoryEncoder:
Expand Down
12 changes: 2 additions & 10 deletions smac/facade/blackbox_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,22 +289,14 @@ def get_random_design( # type: ignore
@staticmethod
def get_multi_objective_algorithm( # type: ignore
scenario: Scenario,
*,
objective_weights: list[float] | None = None,
) -> MeanAggregationStrategy:
"""Returns the mean aggregation strategy for the multi-objective algorithm.
"""Returns the mean aggregation strategy for the multi objective algorithm.

Parameters
----------
scenario : Scenario
objective_weights : list[float] | None, defaults to None
Weights for averaging the objectives in a weighted manner. Must be of the same length as the number of
objectives.
"""
return MeanAggregationStrategy(
scenario=scenario,
objective_weights=objective_weights,
)
return MeanAggregationStrategy(scenario=scenario)

@staticmethod
def get_runhistory_encoder(
Expand Down
12 changes: 2 additions & 10 deletions smac/facade/hyperparameter_optimization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,22 +183,14 @@ def get_random_design( # type: ignore
@staticmethod
def get_multi_objective_algorithm( # type: ignore
scenario: Scenario,
*,
objective_weights: list[float] | None = None,
) -> MeanAggregationStrategy:
"""Returns the mean aggregation strategy for the multi-objective algorithm.
"""Returns the mean aggregation strategy for the multi objective algorithm.

Parameters
----------
scenario : Scenario
objective_weights : list[float] | None, defaults to None
Weights for averaging the objectives in a weighted manner. Must be of the same length as the number of
objectives.
"""
return MeanAggregationStrategy(
scenario=scenario,
objective_weights=objective_weights,
)
return MeanAggregationStrategy(scenario=scenario)

@staticmethod
def get_runhistory_encoder( # type: ignore
Expand Down
12 changes: 2 additions & 10 deletions smac/facade/random_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,22 +142,14 @@ def get_acquisition_maximizer(scenario: Scenario) -> RandomSearch:
@staticmethod
def get_multi_objective_algorithm( # type: ignore
scenario: Scenario,
*,
objective_weights: list[float] | None = None,
) -> MeanAggregationStrategy:
"""Returns the mean aggregation strategy for the multi-objective algorithm.
"""Returns the mean aggregation strategy for the multi objective algorithm.

Parameters
----------
scenario : Scenario
objective_weights : list[float] | None, defaults to None
Weights for averaging the objectives in a weighted manner. Must be of the same length as the number of
objectives.
"""
return MeanAggregationStrategy(
scenario=scenario,
objective_weights=objective_weights,
)
return MeanAggregationStrategy(scenario=scenario)

@staticmethod
def get_runhistory_encoder(scenario: Scenario) -> RunHistoryEncoder:
Expand Down
10 changes: 3 additions & 7 deletions smac/multi_objective/aggregation_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,21 @@

class MeanAggregationStrategy(AbstractMultiObjectiveAlgorithm):
"""A class to mean-aggregate multi-objective costs to a single cost.
If `objective_weights` are provided via the scenario, each objective is weighted
accordingly when computing the mean; otherwise, all objectives are treated equally.

Parameters
----------
scenario : Scenario
objective_weights : list[float] | None, defaults to None
Weights for an weighted average. Must be of the same length as the number of objectives.
"""

def __init__(
self,
scenario: Scenario,
objective_weights: list[float] | None = None,
):
super(MeanAggregationStrategy, self).__init__()

if objective_weights is not None and scenario.count_objectives() != len(objective_weights):
raise ValueError("Number of objectives and number of weights must be equal.")

self._objective_weights = objective_weights
self._objective_weights = scenario.objective_weights

@property
def meta(self) -> dict[str, Any]:
Expand Down
36 changes: 29 additions & 7 deletions smac/multi_objective/parego.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,29 @@


class ParEGO(AbstractMultiObjectiveAlgorithm):
"""ParEGO implementation based on https://ieeexplore.ieee.org/abstract/document/1583627.
"""
ParEGO implementation based on https://ieeexplore.ieee.org/abstract/document/1583627.
If `objective_weights` are provided via the scenario, scalarization weights
are sampled from a Dirichlet distribution centered around these weights.

Parameters
----------
scenario : Scenario
rho : float, defaults to 0.05
A small positive value.
seed : int | None, defaults to None
concentration_scale : float, defaults to 10.0
Scaling factor for the Dirichlet distribution when `objective_weights` are provided:
- Low values -> more exploration (weights vary strongly)
- High values -> stronger focus on the scenario-provided objective_weights
"""

def __init__(
self,
scenario: Scenario,
rho: float = 0.05,
seed: int | None = None,
concentration_scale: float = 10.0,
):
super(ParEGO, self).__init__()

Expand All @@ -35,6 +43,12 @@ def __init__(
self._n_objectives = scenario.count_objectives()
self._seed = seed
self._rng = np.random.RandomState(seed)
self.concentration_scale = concentration_scale

self._objective_weights = None
if scenario.objective_weights is not None:
w = np.asarray(scenario.objective_weights, dtype=float)
self._objective_weights = w / np.sum(w)

self._rho = rho
# Will be set on starting an SMBO iteration
Expand All @@ -48,21 +62,29 @@ def meta(self) -> dict[str, Any]: # noqa: D102
"name": self.__class__.__name__,
"rho": self._rho,
"seed": self._seed,
"objective_weights": self._objective_weights,
"concentration_scale": self.concentration_scale,
}
)

return meta

def update_on_iteration_start(self) -> None: # noqa: D102
self._theta = self._rng.rand(self._n_objectives)

# Normalize so that all theta values sum up to 1
self._theta = self._theta / (np.sum(self._theta) + 1e-10)
def update_on_iteration_start(self) -> None:
"""Sample new scalarization weights for the current iteration."""
if self._objective_weights is None:
# Sample uniformly and normalize to simplex
self._theta = self._rng.rand(self._n_objectives)
self._theta = self._theta / (np.sum(self._theta) + 1e-10)
else:
# Dirichlet sampling around user preference vector
w = self._objective_weights
alpha = self.concentration_scale * w
self._theta = self._rng.dirichlet(alpha)

def __call__(self, values: list[float]) -> float: # noqa: D102
# Weight the values
if self._theta is None:
raise ValueError("Iteration not yet initalized; Call `update_on_iteration_start()` first")
raise ValueError("Iteration not yet initialized; Call `update_on_iteration_start()` first")

theta_f = self._theta * values
return float(np.max(theta_f, axis=0) + self._rho * np.sum(theta_f, axis=0))
15 changes: 15 additions & 0 deletions smac/scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ class Scenario:
and optionally model noise in the Gaussian Process surrogate.
objectives : str | list[str] | None, defaults to "cost"
The objective(s) to optimize. This argument is required for multi-objective optimization.
objective_weights : list[float] | None, defaults to None
Optional preference weights for multi-objective optimization.
Indicate the relative importance of each objective when aggregating them
(e.g., using MeanAggregationStrategy or ParEGO). Must be non-negative
and have the same length as the number of objectives in the scenario.
crash_cost : float | list[float], defaults to np.inf
Defines the cost for a failed trial. In case of multi-objective, each objective can be associated with
a different cost.
Expand Down Expand Up @@ -98,6 +103,7 @@ class Scenario:

# Objectives
objectives: str | list[str] = "cost"
objective_weights: list[float] | None = None
crash_cost: float | list[float] = np.inf
termination_cost_threshold: float | list[float] = np.inf

Expand Down Expand Up @@ -145,6 +151,15 @@ def __post_init__(self) -> None:
if self.adaptive_capping_slackfactor is not None and self.runtime_cutoff is None:
raise ValueError("If adaptive_capping_slackfactor is set, then runtime_cutoff must be set as well.")

if self.objective_weights is not None:
n_objectives = self.count_objectives()
# Ensure one weight per objective
if len(self.objective_weights) != n_objectives:
raise ValueError("objective_weights must have the same length as objectives")
# Ensure weights are non-negative
if any(w < 0 for w in self.objective_weights):
raise ValueError("objective_weights must be non-negative")

# Change directory wrt name and seed
self._change_output_directory()

Expand Down
4 changes: 3 additions & 1 deletion tests/fixtures/scenario.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Callable
from typing import Callable, List, Optional

import pytest
from ConfigSpace import ConfigurationSpace
Expand All @@ -21,6 +21,7 @@ def _make(
n_workers: int = 1,
n_trials: int = 100,
use_default_config: bool = False,
objective_weights: Optional[List[float]] = None,
) -> Scenario:
objectives = "cost"
if use_multi_objective:
Expand All @@ -43,6 +44,7 @@ def _make(
name="test",
output_directory="smac3_output_test",
objectives=objectives,
objective_weights=objective_weights,
deterministic=deterministic,
walltime_limit=30,
n_trials=n_trials,
Expand Down
4 changes: 3 additions & 1 deletion tests/test_multi_objective/test_combined_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from smac.multi_objective.aggregation_strategy import MeanAggregationStrategy
from smac.multi_objective.parego import ParEGO
from smac.scenario import Scenario
from dataclasses import replace

FACADES = [BBFacade, HPOFacade, MFFacade, RFacade, HBFacade, ACFacade]

Expand Down Expand Up @@ -77,7 +78,8 @@ def test_mean_aggregation(facade, make_scenario, configspace):
# TODO: Check whether different weighting affects the sampled configurations.
weights = [[0.1,0.9], [0.5,0.5], [0.8,0.2], [1.0,0.0], [0.0,1.0], None]
for weight_pair in weights:
multi_objective_algorithm = WrapStrategy(MeanAggregationStrategy, objective_weights=weight_pair, scenario=scenario)
scenario = replace(scenario, objective_weights=weight_pair)
multi_objective_algorithm = WrapStrategy(MeanAggregationStrategy, scenario=scenario)
intensifier = Intensifier(scenario, max_config_calls=1, max_incumbents=10)
config_selector = ConfigSelector(scenario, retrain_after=RETRAIN_AFTER)
initial_design = RandomInitialDesign(scenario, n_configs=1)
Expand Down
5 changes: 4 additions & 1 deletion tests/test_runhistory/test_runhistory_multi_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from smac.multi_objective.aggregation_strategy import MeanAggregationStrategy
from smac.runner.abstract_runner import StatusType
from smac.scenario import Scenario
from dataclasses import replace

__copyright__ = "Copyright 2025, Leibniz University Hanover, Institute of AI"
__license__ = "3-clause BSD"
Expand Down Expand Up @@ -590,6 +591,8 @@ def test_objective_weights(scenario, runhistory, config1, config2):
# Average cost returns us 0.5
assert runhistory.get_cost(config1) == 0.5

scenario = replace(scenario, objective_weights=[1, 2])

# If we change the weights/mo algorithm now, we expect a higher value in the second cost
runhistory.multi_objective_algorithm = MeanAggregationStrategy(scenario, objective_weights=[1, 2])
runhistory.multi_objective_algorithm = MeanAggregationStrategy(scenario)
assert round(runhistory.get_cost(config1), 2) == 0.67
Loading