Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions python/ray/tune/BUILD.bazel

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
CHECKPOINT_FILENAME = "booster-checkpoint.json"


def get_best_model_checkpoint(best_result: "ray.train.Result"):
def get_best_model_checkpoint(best_result: "ray.tune.Result"):
best_bst = TuneReportCheckpointCallback.get_model(
best_result.checkpoint, filename=CHECKPOINT_FILENAME
)
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/examples/xgboost_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def average_cv_folds(results_dict: Dict[str, List[float]]) -> Dict[str, float]:
)


def get_best_model_checkpoint(best_result: "ray.train.Result"):
def get_best_model_checkpoint(best_result: "ray.tune.Result"):
best_bst = TuneReportCheckpointCallback.get_model(
best_result.checkpoint, filename=CHECKPOINT_FILENAME
)
Expand Down
26 changes: 7 additions & 19 deletions python/ray/tune/tests/_test_multi_tenancy_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
import time
from pathlib import Path

from ray import train, tune
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray import tune
from ray.tune.search import BasicVariantGenerator

# Hang full script until this marker is deleted
Expand Down Expand Up @@ -48,28 +47,17 @@ def train_func(config):
time.sleep(0.1)

# Finish trial
train.report({"param": config["param"], "fixed": config["fixed"]})
tune.report({"param": config["param"], "fixed": config["fixed"]})


if __name__ == "__main__":
trainer = DataParallelTrainer(
train_loop_per_worker=train_func,
train_loop_config={
"fixed": FIXED_VAL,
},
scaling_config=train.ScalingConfig(
num_workers=1, trainer_resources={"CPU": 0}, resources_per_worker={"CPU": 2}
),
)

tuner = tune.Tuner(
trainer,
tune.with_resources(train_func, {"CPU": 2}),
param_space={
"train_loop_config": {
"param": tune.grid_search(VALS),
"delete_marker": DELETE_TRIAL_MARKER,
"hang_marker": HANG_TRIAL_MARKER,
}
"fixed": FIXED_VAL,
"param": tune.grid_search(VALS),
"delete_marker": DELETE_TRIAL_MARKER,
"hang_marker": HANG_TRIAL_MARKER,
},
tune_config=tune.TuneConfig(search_alg=BasicVariantGenerator(max_concurrent=1)),
)
Expand Down
3 changes: 2 additions & 1 deletion python/ray/tune/tests/test_api_checkpoint_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def ray_start_4_cpus_2_gpus_extra():
ray.shutdown()


# TODO: [V2] Delete the `data_parallel` variant once V1 is fully removed.
@pytest.mark.parametrize("trainable_type", ["class", "function", "data_parallel"])
@pytest.mark.parametrize("patch_iter", [False, True])
def test_checkpoint_freq_dir_name(
Expand Down Expand Up @@ -77,7 +78,7 @@ def train_fn(config):
(Path(checkpoint_dir) / "data.ckpt").write_text(str(step))
ray.tune.report(
{"step": step},
checkpoint=ray.train.Checkpoint.from_directory(
checkpoint=ray.tune.Checkpoint.from_directory(
checkpoint_dir
),
)
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import pytest

import ray
import ray.train
from ray import tune
from ray.train.tests.util import create_dict_checkpoint
from ray.tune.cli import commands
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import unittest

import ray
import ray.train
from ray.tune import CheckpointConfig, register_trainable
from ray.tune.error import TuneError
from ray.tune.experiment import Experiment, _convert_to_experiment_list
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_function_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import unittest

import ray
import ray.train
from ray import tune
from ray.air.constants import TRAINING_ITERATION
from ray.rllib import _register_all
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from unittest.mock import patch

import ray
import ray.train
from ray.tune import choice, register_trainable, run, run_experiments
from ray.tune.experiment import Experiment, Trial
from ray.tune.result import TIMESTEPS_TOTAL
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import unittest

import ray
import ray.train
from ray.tune import (
CheckpointConfig,
Trainable,
Expand Down
1 change: 0 additions & 1 deletion python/ray/tune/tests/test_tune_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import pytest

import ray
import ray.train
from ray import tune
from ray._private.test_utils import run_string_as_driver
from ray.exceptions import RayTaskError
Expand Down
1 change: 1 addition & 0 deletions python/ray/tune/tests/test_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,7 @@ def test_nonserializable_trainable():
Tuner(lambda config: print(lock))


# TODO: [V2] Delete the `trainer` variant once V1 is fully removed.
def _test_no_chdir(runner_type, runtime_env, use_deprecated_config=False):
# Write a data file that we want to read in our training loop
with open("./read.txt", "w") as f:
Expand Down
1 change: 1 addition & 0 deletions python/ray/tune/tests/test_tuner_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -875,6 +875,7 @@ def on_trial_result(self, runner, trial, result):
)


# TODO: [V2] Delete the `data_parallel` variant once V1 is fully removed.
@pytest.mark.parametrize("trainable_type", ["function", "class", "data_parallel"])
def test_checkpoints_saved_after_resume(ray_start_2_cpus, tmp_path, trainable_type):
"""Checkpoints saved after experiment restore should pick up at the correct
Expand Down