Skip to content

Runner

Execute simulations across experimental grids.

trade_study.run_grid(world, scorer, grid, observables, *, annotations=None, n_jobs=1, callback=None)

Run all configurations in a grid.

Parameters:

Name Type Description Default
world Simulator

Simulator that generates (truth, observations).

required
scorer Scorer

Scorer that evaluates observables.

required
grid list[dict[str, Any]]

List of config dicts to evaluate.

required
observables list[Observable]

Observable definitions (for column ordering).

required
annotations list[Annotation] | None

Optional external annotations (costs, etc.).

None
n_jobs int

Number of parallel workers (-1 for all CPUs).

1
callback ProgressCallback | None

Optional progress callback invoked after each trial with (trial_index, total_trials, trial_result).

None

Returns:

Type Description
ResultsTable

ResultsTable with scored results.

Source code in src/trade_study/runner.py
def run_grid(
    world: Simulator,
    scorer: Scorer,
    grid: list[dict[str, Any]],
    observables: list[Observable],
    *,
    annotations: list[Annotation] | None = None,
    n_jobs: int = 1,
    callback: ProgressCallback | None = None,
) -> ResultsTable:
    """Run all configurations in a grid.

    Args:
        world: Simulator that generates (truth, observations).
        scorer: Scorer that evaluates observables.
        grid: List of config dicts to evaluate.
        observables: Observable definitions (for column ordering).
        annotations: Optional external annotations (costs, etc.).
        n_jobs: Number of parallel workers (-1 for all CPUs).
        callback: Optional progress callback invoked after each trial
            with ``(trial_index, total_trials, trial_result)``.

    Returns:
        ResultsTable with scored results.
    """
    total = len(grid)
    if n_jobs == 1:
        results: list[TrialResult] = []
        for i, cfg in enumerate(grid):
            r = _run_single(world, scorer, cfg)
            results.append(r)
            if callback is not None:
                callback(i, total, r)
    else:
        from joblib import Parallel, delayed  # type: ignore[import-untyped]

        results = Parallel(n_jobs=n_jobs)(
            delayed(_run_single)(world, scorer, cfg) for cfg in grid
        )
        if callback is not None:
            for i, r in enumerate(results):
                callback(i, total, r)

    obs_names = [o.name for o in observables]
    score_matrix = np.array([
        [r.scores.get(name, np.nan) for name in obs_names] for r in results
    ])

    ann_matrix = None
    ann_names: list[str] = []
    if annotations:
        ann_names = [a.name for a in annotations]
        ann_matrix = np.array([
            [a.resolve(r.config) for a in annotations] for r in results
        ])

    return ResultsTable(
        configs=[r.config for r in results],
        scores=score_matrix,
        observable_names=obs_names,
        annotations=ann_matrix,
        annotation_names=ann_names,
        metadata=[{"wall_seconds": r.wall_seconds} for r in results],
    )

trade_study.run_adaptive(world, scorer, factors, observables, *, n_trials=100, seed=42)

Run adaptive multi-objective optimization via optuna.

Parameters:

Name Type Description Default
world Simulator

Simulator.

required
scorer Scorer

Scorer for observables.

required
factors list[Factor]

Factor definitions (from design module).

required
observables list[Observable]

Observable definitions.

required
n_trials int

Number of optuna trials.

100
seed int

Random seed.

42

Returns:

Type Description
ResultsTable

ResultsTable with scored results.

Source code in src/trade_study/runner.py
def run_adaptive(
    world: Simulator,
    scorer: Scorer,
    factors: list[Factor],
    observables: list[Observable],
    *,
    n_trials: int = 100,
    seed: int = 42,
) -> ResultsTable:
    """Run adaptive multi-objective optimization via optuna.

    Args:
        world: Simulator.
        scorer: Scorer for observables.
        factors: Factor definitions (from design module).
        observables: Observable definitions.
        n_trials: Number of optuna trials.
        seed: Random seed.

    Returns:
        ResultsTable with scored results.
    """
    import optuna as _optuna

    from .design import FactorType

    directions_str = [
        "minimize" if o.direction == Direction.MINIMIZE else "maximize"
        for o in observables
    ]

    study = _optuna.create_study(
        directions=directions_str,
        sampler=_optuna.samplers.NSGAIISampler(seed=seed),
    )

    obs_names = [o.name for o in observables]
    obs_weights = [o.weight for o in observables]

    def objective(trial: optuna.trial.Trial) -> tuple[float, ...]:
        config: dict[str, Any] = {}
        for f in factors:
            if f.factor_type == FactorType.CONTINUOUS and f.bounds is not None:
                config[f.name] = trial.suggest_float(
                    f.name,
                    f.bounds[0],
                    f.bounds[1],
                )
            elif f.levels is not None and f.factor_type in {
                FactorType.CATEGORICAL,
                FactorType.DISCRETE,
            }:
                config[f.name] = trial.suggest_categorical(f.name, f.levels)
        truth, observations = world.generate(config)
        scores = scorer.score(truth, observations, config)
        return tuple(
            scores.get(name, float("nan")) * w
            for name, w in zip(obs_names, obs_weights, strict=True)
        )

    _optuna.logging.set_verbosity(_optuna.logging.WARNING)
    study.optimize(objective, n_trials=n_trials)

    configs = []
    score_rows = []
    for trial in study.trials:
        configs.append(trial.params)
        score_rows.append(list(trial.values))

    return ResultsTable(
        configs=configs,
        scores=np.array(score_rows),
        observable_names=obs_names,
    )