diff --git a/python/paddle/distributed/auto_parallel/__init__.py b/python/paddle/distributed/auto_parallel/__init__.py index 835ca68df2d1c1..4486b3220fa4dd 100644 --- a/python/paddle/distributed/auto_parallel/__init__.py +++ b/python/paddle/distributed/auto_parallel/__init__.py @@ -14,7 +14,7 @@ from .strategy import Strategy from .process_mesh import ProcessMesh -from .engine import Engine +from .static.engine import Engine from .interface import shard_tensor from .interface import shard_op from .interface import recompute diff --git a/python/paddle/distributed/auto_parallel/dygraph/__init__.py b/python/paddle/distributed/auto_parallel/dygraph/__init__.py new file mode 100644 index 00000000000000..1ee2fa6eb06972 --- /dev/null +++ b/python/paddle/distributed/auto_parallel/dygraph/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 76207bc588968c..06a24b0c5433ee 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -14,11 +14,11 @@ import paddle -from .dist_context import get_default_distributed_context -from .dist_op import DistributedOperatorHelper -from .dist_tensor import DistributedTensor from .process_mesh import ProcessMesh, get_current_process_mesh -from .utils import ( +from .static.dist_context import get_default_distributed_context +from .static.dist_op import DistributedOperatorHelper +from .static.dist_tensor import DistributedTensor +from .static.utils import ( __no_shape_var_type__, convert_to_dims_mapping, verify_shard_spec, diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index e2ccd16aaaad4b..1c2f292e5f8614 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -140,12 +140,12 @@ def __init__(self, mesh=None, dim_names=None, shape=None, process_ids=None): ) # Store all process meshes - from .dist_context import get_default_distributed_context + from .static.dist_context import get_default_distributed_context default_dist_cxt = get_default_distributed_context() default_dist_cxt.add_process_mesh(self) # Add new processes to process group 0 - from .process_group import get_process_group + from .static.process_group import get_process_group pg0 = get_process_group(0) pg0.add_ranks(self.process_ids) @@ -204,14 +204,14 @@ def __enter__(self): self._old_op_size = len(cur_block.ops) def __exit__(self, exc_type, exc_value, exc_traceback): - from .dist_op import DistributedOperator - from .dist_tensor import DistributedTensor + from .static.dist_op import DistributedOperator + from .static.dist_tensor import DistributedTensor default_prog = paddle.static.default_main_program() cur_block = default_prog.current_block() new_var_names = list(cur_block.vars.keys()) new_op_size = len(cur_block.ops) - from .dist_context import get_default_distributed_context + from .static.dist_context import get_default_distributed_context default_dist_ctx = get_default_distributed_context() for name in new_var_names: diff --git a/python/paddle/distributed/auto_parallel/random.py b/python/paddle/distributed/auto_parallel/random.py index 5ca6d9e9ea0696..d238fd60232d07 100644 --- a/python/paddle/distributed/auto_parallel/random.py +++ b/python/paddle/distributed/auto_parallel/random.py @@ -17,7 +17,7 @@ from ..utils.log_utils import get_logger from .process_mesh import retrive_unique_id_for_process_mesh -from .utils import _get_idx_in_axis +from .static.utils import _get_idx_in_axis _logger = get_logger(logging.INFO) diff --git a/python/paddle/distributed/auto_parallel/static/__init__.py b/python/paddle/distributed/auto_parallel/static/__init__.py new file mode 100644 index 00000000000000..6f0ea85344b7e0 --- /dev/null +++ b/python/paddle/distributed/auto_parallel/static/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/distributed/auto_parallel/auto_align_tool.py b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py similarity index 99% rename from python/paddle/distributed/auto_parallel/auto_align_tool.py rename to python/paddle/distributed/auto_parallel/static/auto_align_tool.py index 76a8db09fdcd94..2cd9e4a05d9193 100644 --- a/python/paddle/distributed/auto_parallel/auto_align_tool.py +++ b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py @@ -21,11 +21,11 @@ import paddle import paddle.distributed as dist -from paddle.distributed.auto_parallel.converter import Converter -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.converter import Converter +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_backward_op, is_forward_op, is_loss_op, diff --git a/python/paddle/distributed/auto_parallel/callbacks.py b/python/paddle/distributed/auto_parallel/static/callbacks.py similarity index 99% rename from python/paddle/distributed/auto_parallel/callbacks.py rename to python/paddle/distributed/auto_parallel/static/callbacks.py index db7f460b0f0521..6cbfaceee34802 100644 --- a/python/paddle/distributed/auto_parallel/callbacks.py +++ b/python/paddle/distributed/auto_parallel/static/callbacks.py @@ -24,7 +24,7 @@ ProgBarLogger, ) -from .interface import CollectionNames, get_collection +from ..interface import CollectionNames, get_collection def config_callbacks( diff --git a/python/paddle/distributed/auto_parallel/cluster.py b/python/paddle/distributed/auto_parallel/static/cluster.py similarity index 99% rename from python/paddle/distributed/auto_parallel/cluster.py rename to python/paddle/distributed/auto_parallel/static/cluster.py index 937404369700ac..c5df57be2bf3f6 100644 --- a/python/paddle/distributed/auto_parallel/cluster.py +++ b/python/paddle/distributed/auto_parallel/static/cluster.py @@ -20,7 +20,7 @@ import paddle -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger @unique diff --git a/python/paddle/distributed/auto_parallel/cluster_v2.py b/python/paddle/distributed/auto_parallel/static/cluster_v2.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cluster_v2.py rename to python/paddle/distributed/auto_parallel/static/cluster_v2.py diff --git a/python/paddle/distributed/auto_parallel/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py similarity index 99% rename from python/paddle/distributed/auto_parallel/completion.py rename to python/paddle/distributed/auto_parallel/static/completion.py index 5f2ab7e102b0b2..cd505be0289260 100644 --- a/python/paddle/distributed/auto_parallel/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -18,11 +18,11 @@ from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.framework import core +from ..process_mesh import ProcessMesh, compute_compatible_process_mesh from .dist_attribute import OperatorDistAttr, TensorDistAttr from .dist_context import _node_id from .operators import find_compatible_distributed_operator_impls from .process_group import get_world_process_group -from .process_mesh import ProcessMesh, compute_compatible_process_mesh from .utils import ( __no_shape_var_type__, get_logger, @@ -1641,7 +1641,7 @@ def complete_update_annotation(self, serial_main_program): """Complete the annotation of vars and ops in the update phase for parallel program.""" # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import ( + from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) @@ -1895,7 +1895,7 @@ def complete_prim_annotation(self, serial_main_program=None): def _init_global_mesh_for_program(self): # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import ( + from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) diff --git a/python/paddle/distributed/auto_parallel/converter.py b/python/paddle/distributed/auto_parallel/static/converter.py similarity index 99% rename from python/paddle/distributed/auto_parallel/converter.py rename to python/paddle/distributed/auto_parallel/static/converter.py index 65df19ad69c174..68f571857d7cf3 100644 --- a/python/paddle/distributed/auto_parallel/converter.py +++ b/python/paddle/distributed/auto_parallel/static/converter.py @@ -19,7 +19,7 @@ import paddle -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger class Converter: diff --git a/python/paddle/distributed/auto_parallel/cost/__init__.py b/python/paddle/distributed/auto_parallel/static/cost/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/__init__.py rename to python/paddle/distributed/auto_parallel/static/cost/__init__.py diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/base_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/base_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py b/python/paddle/distributed/auto_parallel/static/cost/comm_op_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/comm_op_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/comm_op_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py b/python/paddle/distributed/auto_parallel/static/cost/comp_op_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/comp_op_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/comp_op_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/estimate_cost.py b/python/paddle/distributed/auto_parallel/static/cost/estimate_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/estimate_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/estimate_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py similarity index 97% rename from python/paddle/distributed/auto_parallel/cost/tensor_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py index 6567088cae9784..17d3b0476081af 100644 --- a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py +++ b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py @@ -15,7 +15,9 @@ from functools import reduce import paddle -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) from paddle.static import Variable from .base_cost import Cost diff --git a/python/paddle/distributed/auto_parallel/cost_model.py b/python/paddle/distributed/auto_parallel/static/cost_model.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost_model.py rename to python/paddle/distributed/auto_parallel/static/cost_model.py diff --git a/python/paddle/distributed/auto_parallel/dist_attribute.py b/python/paddle/distributed/auto_parallel/static/dist_attribute.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_attribute.py rename to python/paddle/distributed/auto_parallel/static/dist_attribute.py diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/static/dist_context.py similarity index 99% rename from python/paddle/distributed/auto_parallel/dist_context.py rename to python/paddle/distributed/auto_parallel/static/dist_context.py index f3418f271825a4..df774d79774c95 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/static/dist_context.py @@ -18,9 +18,9 @@ from paddle.distributed.passes import PassContext from paddle.framework import IrGraph, core, set_flags +from ..process_mesh import ProcessMesh from .dist_op import DistributedOperator from .dist_tensor import DistributedTensor -from .process_mesh import ProcessMesh from .utils import ( __no_shape_var_type__, _copy_dist_attr_to_cpp, diff --git a/python/paddle/distributed/auto_parallel/dist_loader.py b/python/paddle/distributed/auto_parallel/static/dist_loader.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_loader.py rename to python/paddle/distributed/auto_parallel/static/dist_loader.py diff --git a/python/paddle/distributed/auto_parallel/dist_op.py b/python/paddle/distributed/auto_parallel/static/dist_op.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_op.py rename to python/paddle/distributed/auto_parallel/static/dist_op.py diff --git a/python/paddle/distributed/auto_parallel/dist_saver.py b/python/paddle/distributed/auto_parallel/static/dist_saver.py similarity index 99% rename from python/paddle/distributed/auto_parallel/dist_saver.py rename to python/paddle/distributed/auto_parallel/static/dist_saver.py index 9e99c58d8487b0..26b9c32c92cb2f 100644 --- a/python/paddle/distributed/auto_parallel/dist_saver.py +++ b/python/paddle/distributed/auto_parallel/static/dist_saver.py @@ -23,7 +23,7 @@ import paddle from paddle.framework import core -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger from .process_group import _g_process_group_map from .utils import get_dist_attr diff --git a/python/paddle/distributed/auto_parallel/dist_tensor.py b/python/paddle/distributed/auto_parallel/static/dist_tensor.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_tensor.py rename to python/paddle/distributed/auto_parallel/static/dist_tensor.py diff --git a/python/paddle/distributed/auto_parallel/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py similarity index 99% rename from python/paddle/distributed/auto_parallel/engine.py rename to python/paddle/distributed/auto_parallel/static/engine.py index 7a979a864200a4..4ab2d4a7c9ac6a 100644 --- a/python/paddle/distributed/auto_parallel/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -22,7 +22,7 @@ import numpy as np import paddle -import paddle.distributed.auto_parallel.utils as auto_utils +import paddle.distributed.auto_parallel.static.utils as auto_utils from paddle import static, utils from paddle.distributed import fleet from paddle.fluid.executor import _to_name_str @@ -32,7 +32,9 @@ from paddle.metric import Metric from paddle.static import InputSpec, Operator, Variable, global_scope -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger +from ..interface import CollectionNames, fetch, get_collection +from ..strategy import Strategy from .callbacks import config_callbacks from .cluster import Cluster, get_default_cluster from .converter import Converter @@ -45,11 +47,9 @@ from .dist_op import DistributedOperator from .dist_saver import DistributedSaver from .helper import ProgramHelper -from .interface import CollectionNames, fetch, get_collection from .parallelizer_v2 import Parallelizer from .planner_v2 import Planner from .process_group import get_all_process_groups, new_process_group -from .strategy import Strategy class Engine: diff --git a/python/paddle/distributed/auto_parallel/graph.py b/python/paddle/distributed/auto_parallel/static/graph.py similarity index 100% rename from python/paddle/distributed/auto_parallel/graph.py rename to python/paddle/distributed/auto_parallel/static/graph.py diff --git a/python/paddle/distributed/auto_parallel/helper.py b/python/paddle/distributed/auto_parallel/static/helper.py similarity index 100% rename from python/paddle/distributed/auto_parallel/helper.py rename to python/paddle/distributed/auto_parallel/static/helper.py diff --git a/python/paddle/distributed/auto_parallel/mapper.py b/python/paddle/distributed/auto_parallel/static/mapper.py similarity index 100% rename from python/paddle/distributed/auto_parallel/mapper.py rename to python/paddle/distributed/auto_parallel/static/mapper.py diff --git a/python/paddle/distributed/auto_parallel/operators/__init__.py b/python/paddle/distributed/auto_parallel/static/operators/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/__init__.py rename to python/paddle/distributed/auto_parallel/static/operators/__init__.py diff --git a/python/paddle/distributed/auto_parallel/operators/common.py b/python/paddle/distributed/auto_parallel/static/operators/common.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/common.py rename to python/paddle/distributed/auto_parallel/static/operators/common.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_assign.py b/python/paddle/distributed/auto_parallel/static/operators/dist_assign.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_assign.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_assign.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py index 2327793e459b37..b397903ee78463 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole diff --git a/python/paddle/distributed/auto_parallel/operators/dist_default.py b/python/paddle/distributed/auto_parallel/static/operators/dist_default.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_default.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_default.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py b/python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py similarity index 98% rename from python/paddle/distributed/auto_parallel/operators/dist_dropout.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py index dde852e613eff5..a5af154f385b09 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py @@ -18,10 +18,10 @@ from paddle.framework import core from paddle.utils import unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from ..utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py b/python/paddle/distributed/auto_parallel/static/operators/dist_eltwise.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_eltwise.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_eltwise.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_embedding.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py index 4f13c89bb14fd6..8e6bbae74df182 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py @@ -13,7 +13,7 @@ # limitations under the License from paddle.common_ops_import import check_dtype, check_variable_and_dtype -from paddle.distributed.auto_parallel.cost.comm_op_cost import ( +from paddle.distributed.auto_parallel.static.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fill_constant_batch_size_like.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fill_constant_batch_size_like.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py similarity index 97% rename from python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py index 331bdfd25ae0ab..2812554eb0a4ab 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py @@ -14,10 +14,10 @@ import logging -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from .common import ( DistributedOperatorImplContainer, register_distributed_operator_impl, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_attention.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_attention.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py similarity index 98% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py index 12612540a9a671..a97309a587d050 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py @@ -18,10 +18,10 @@ from paddle.framework import core from paddle.utils import unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from ..utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_feedforward.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_feedforward.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_matmul.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py index 8825e14d9aba7d..28eed81c6bcb06 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py @@ -15,7 +15,7 @@ import copy from paddle.common_ops_import import check_dtype, check_variable_and_dtype -from paddle.distributed.auto_parallel.cost.comm_op_cost import ( +from paddle.distributed.auto_parallel.static.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py b/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_pnorm.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_reshape.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_scale.py b/python/paddle/distributed/auto_parallel/static/operators/dist_scale.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_scale.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_scale.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_shape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_shape.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_shape.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_shape.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_slice.py b/python/paddle/distributed/auto_parallel/static/operators/dist_slice.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_slice.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_slice.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_softmax.py b/python/paddle/distributed/auto_parallel/static/operators/dist_softmax.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_softmax.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_softmax.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_split.py b/python/paddle/distributed/auto_parallel/static/operators/dist_split.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_split.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_split.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_transpose.py b/python/paddle/distributed/auto_parallel/static/operators/dist_transpose.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_transpose.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_transpose.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py diff --git a/python/paddle/distributed/auto_parallel/parallelizer.py b/python/paddle/distributed/auto_parallel/static/parallelizer.py similarity index 100% rename from python/paddle/distributed/auto_parallel/parallelizer.py rename to python/paddle/distributed/auto_parallel/static/parallelizer.py diff --git a/python/paddle/distributed/auto_parallel/parallelizer_v2.py b/python/paddle/distributed/auto_parallel/static/parallelizer_v2.py similarity index 99% rename from python/paddle/distributed/auto_parallel/parallelizer_v2.py rename to python/paddle/distributed/auto_parallel/static/parallelizer_v2.py index 6807016c34f172..8a5def0ec9d0e3 100644 --- a/python/paddle/distributed/auto_parallel/parallelizer_v2.py +++ b/python/paddle/distributed/auto_parallel/static/parallelizer_v2.py @@ -20,10 +20,10 @@ from paddle.static import append_backward, program_guard from paddle.utils import unique_name -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger +from ..random import init_auto_parallel_rng from .partitioner import Partitioner from .process_group import get_world_process_group -from .random import init_auto_parallel_rng from .reshard import Resharder from .utils import set_grad_var_shape diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/static/partitioner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/partitioner.py rename to python/paddle/distributed/auto_parallel/static/partitioner.py index f542b49fdecbde..a0190c3d3c4095 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/static/partitioner.py @@ -15,8 +15,10 @@ import copy import paddle -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.framework import Program, core diff --git a/python/paddle/distributed/auto_parallel/planner.py b/python/paddle/distributed/auto_parallel/static/planner.py similarity index 100% rename from python/paddle/distributed/auto_parallel/planner.py rename to python/paddle/distributed/auto_parallel/static/planner.py diff --git a/python/paddle/distributed/auto_parallel/planner_v2.py b/python/paddle/distributed/auto_parallel/static/planner_v2.py similarity index 96% rename from python/paddle/distributed/auto_parallel/planner_v2.py rename to python/paddle/distributed/auto_parallel/static/planner_v2.py index efe154b19004a3..f0ac925371055b 100755 --- a/python/paddle/distributed/auto_parallel/planner_v2.py +++ b/python/paddle/distributed/auto_parallel/static/planner_v2.py @@ -18,15 +18,17 @@ import numpy as np -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger from .completion import Completer from .dist_context import get_default_distributed_context from .tuner.parallel_tuner import ParallelTuner diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/static/process_group.py similarity index 98% rename from python/paddle/distributed/auto_parallel/process_group.py rename to python/paddle/distributed/auto_parallel/static/process_group.py index e7d8a758161616..578ec21e8082b8 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/static/process_group.py @@ -17,8 +17,8 @@ import paddle from paddle.framework import core -from ..collective import _get_global_env, _new_ring_id -from ..utils.log_utils import get_logger +from ...collective import _get_global_env, _new_ring_id +from ...utils.log_utils import get_logger from .utils import dygraph_guard logger = get_logger("INFO", __name__) diff --git a/python/paddle/distributed/auto_parallel/process_mesh_v2.py b/python/paddle/distributed/auto_parallel/static/process_mesh_v2.py similarity index 100% rename from python/paddle/distributed/auto_parallel/process_mesh_v2.py rename to python/paddle/distributed/auto_parallel/static/process_mesh_v2.py diff --git a/python/paddle/distributed/auto_parallel/reshard.py b/python/paddle/distributed/auto_parallel/static/reshard.py similarity index 100% rename from python/paddle/distributed/auto_parallel/reshard.py rename to python/paddle/distributed/auto_parallel/static/reshard.py diff --git a/python/paddle/distributed/auto_parallel/topology.py b/python/paddle/distributed/auto_parallel/static/topology.py similarity index 100% rename from python/paddle/distributed/auto_parallel/topology.py rename to python/paddle/distributed/auto_parallel/static/topology.py diff --git a/python/paddle/distributed/auto_parallel/tuner/__init__.py b/python/paddle/distributed/auto_parallel/static/tuner/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/__init__.py rename to python/paddle/distributed/auto_parallel/static/tuner/__init__.py diff --git a/python/paddle/distributed/auto_parallel/tuner/algorithms.py b/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/algorithms.py rename to python/paddle/distributed/auto_parallel/static/tuner/algorithms.py diff --git a/python/paddle/distributed/auto_parallel/tuner/config.py b/python/paddle/distributed/auto_parallel/static/tuner/config.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/config.py rename to python/paddle/distributed/auto_parallel/static/tuner/config.py index 78f94b87b360b3..28ab9536b9bccf 100644 --- a/python/paddle/distributed/auto_parallel/tuner/config.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/config.py @@ -15,7 +15,7 @@ import copy import os -from ..strategy import Strategy +from ...strategy import Strategy _tuning_supported_passes = ["sharding", "recompute"] diff --git a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py similarity index 97% rename from python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py index b3a925070b320a..8b3d23c68cb1ae 100644 --- a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py @@ -27,16 +27,18 @@ import time import paddle -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( clear_all_process_groups, get_all_process_groups, new_process_group, ) -from paddle.distributed.auto_parallel.reshard import Resharder -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.reshard import Resharder +from paddle.distributed.auto_parallel.static.utils import ( debug_program, set_grad_var_shape, ) @@ -465,7 +467,7 @@ def _launch_profile(self, ctx_path, trial_dir): ] ) cmd_args = ( - "-m paddle.distributed.auto_parallel.tuner.profiler" + "-m paddle.distributed.auto_parallel.static.tuner.profiler" + " " + profile_args ) diff --git a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py index 4a3f85d6b21daf..c2c1055663ccc2 100644 --- a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py @@ -21,13 +21,13 @@ import numpy as np +from ...process_mesh import ProcessMesh from ..completion import Completer from ..cost import CostEstimator from ..dist_context import _node_id from ..dist_op import DistributedOperator from ..operators.common import find_compatible_distributed_operator_impls from ..parallelizer_v2 import Parallelizer -from ..process_mesh import ProcessMesh from .trial import Trial, TrialStatus from .tunable_space import TunableSpace from .tunable_variable import Boolean, IntRange diff --git a/python/paddle/distributed/auto_parallel/tuner/profiler.py b/python/paddle/distributed/auto_parallel/static/tuner/profiler.py similarity index 98% rename from python/paddle/distributed/auto_parallel/tuner/profiler.py rename to python/paddle/distributed/auto_parallel/static/tuner/profiler.py index 486db968ee3bef..55f83b48647aa0 100644 --- a/python/paddle/distributed/auto_parallel/tuner/profiler.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/profiler.py @@ -21,10 +21,10 @@ import traceback import paddle -from paddle.distributed.auto_parallel.dist_loader import ( +from paddle.distributed.auto_parallel.static.dist_loader import ( DistributedDataLoaderFromGenerator, ) -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.process_group import ( get_all_process_groups, new_process_group, ) diff --git a/python/paddle/distributed/auto_parallel/tuner/recorder.py b/python/paddle/distributed/auto_parallel/static/tuner/recorder.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/recorder.py rename to python/paddle/distributed/auto_parallel/static/tuner/recorder.py diff --git a/python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py index 5ef0e872933453..bef30c7ce3aa57 100644 --- a/python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py @@ -26,20 +26,24 @@ import numpy as np import paddle -from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_gradient_clip_op, print_program_with_dist_attr, ) @@ -48,7 +52,7 @@ from paddle.fluid.backward import append_backward from paddle.fluid.framework import Parameter, unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger from ..graph import Graph _PATTERNS = {} diff --git a/python/paddle/distributed/auto_parallel/tuner/storable.py b/python/paddle/distributed/auto_parallel/static/tuner/storable.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/storable.py rename to python/paddle/distributed/auto_parallel/static/tuner/storable.py diff --git a/python/paddle/distributed/auto_parallel/tuner/trial.py b/python/paddle/distributed/auto_parallel/static/tuner/trial.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/trial.py rename to python/paddle/distributed/auto_parallel/static/tuner/trial.py diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_space.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/tunable_space.py rename to python/paddle/distributed/auto_parallel/static/tuner/tunable_space.py diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/tunable_variable.py rename to python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py diff --git a/python/paddle/distributed/auto_parallel/utils.py b/python/paddle/distributed/auto_parallel/static/utils.py similarity index 99% rename from python/paddle/distributed/auto_parallel/utils.py rename to python/paddle/distributed/auto_parallel/static/utils.py index d5a196a080d6cc..fa9aeacd001b9c 100644 --- a/python/paddle/distributed/auto_parallel/utils.py +++ b/python/paddle/distributed/auto_parallel/static/utils.py @@ -27,8 +27,8 @@ from paddle.framework.io_utils import is_belong_to_optimizer, is_parameter from paddle.static import Variable +from ..process_mesh import ProcessMesh from .dist_attribute import OperatorDistAttr, TensorDistAttr -from .process_mesh import ProcessMesh OpRole = core.op_proto_and_checker_maker.OpRole OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() @@ -1868,7 +1868,7 @@ def get_lr(optimizer): def initialize_pg_in_full_mode(all_process_groups, cur_rank): import socket - from ..collective import _get_global_env + from ...collective import _get_global_env has_recv_by_socket = [] # This is a magic number @@ -1946,7 +1946,7 @@ def is_recompute_op(op): def set_recompute_segments(model, losses, strategy, program): - from ..passes.auto_parallel_recompute import RecomputeState + from ...passes.auto_parallel_recompute import RecomputeState if not losses: return @@ -2054,7 +2054,7 @@ def validate_opt(optimizer): def set_data_parallel(x): - from .interface import ProcessMesh, shard_tensor + from ..interface import ProcessMesh, shard_tensor from .process_group import get_world_process_group world_ranks = get_world_process_group().ranks @@ -2095,7 +2095,7 @@ def _copy_tensor_dist_attr_to_cpp(cpp_dist_attr, py_dist_attr): def _copy_tensor_dist_attr_from_cpp(cpp_dist_attr, py_dist_attr): - from .process_mesh import ProcessMesh + from ..process_mesh import ProcessMesh cpp_process_mesh = cpp_dist_attr.process_mesh if cpp_process_mesh is not None: @@ -2128,7 +2128,7 @@ def _copy_op_dist_attr_to_cpp(cpp_dist_attr, py_dist_attr): def _copy_op_dist_attr_from_cpp(cpp_dist_attr, py_dist_attr): - from .process_mesh import ProcessMesh + from ..process_mesh import ProcessMesh cpp_process_mesh = cpp_dist_attr.process_mesh if cpp_process_mesh is not None: diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 39948ab28e6ef1..de003916b7d255 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -1335,7 +1335,7 @@ def _minimize_impl( self._user_defined_strategy.semi_auto or self._user_defined_strategy.auto_search ): - from ..auto_parallel.parallelizer import AutoParallelizer + from ..auto_parallel.static.parallelizer import AutoParallelizer auto_parallelizer = AutoParallelizer(self) ( diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index def5156f811aa2..a6f12af17fa5b1 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -13,11 +13,13 @@ # limitations under the License. import paddle -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, ) @@ -42,7 +44,7 @@ from paddle.utils import unique_name from ..auto_parallel.process_mesh import ProcessMesh -from ..auto_parallel.utils import ( +from ..auto_parallel.static.utils import ( is_backward_op, is_forward_op, is_loss_grad_op, diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index 5d519bcc94e06b..a371792c5198d8 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -15,16 +15,16 @@ from collections import OrderedDict import paddle -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_data_parallel_reduce_op, is_data_parallel_scale_op, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( find_higher_order_backward_op, get_var_numel, insert_dependencies_for_vars, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 6a763ce15030f7..8da9edb34258dc 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -16,11 +16,13 @@ import paddle from paddle.common_ops_import import check_type, check_variable_and_dtype -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_backward_op, is_forward_op, naive_set_dist_op_attr_for_program_by_mesh_and_mapping, diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index 481ba3b6c31131..bda2b557fc54c1 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -19,18 +19,21 @@ import paddle from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from ..auto_parallel.dist_attribute import OperatorDistAttr, TensorDistAttr -from ..auto_parallel.operators.common import ( +from ..auto_parallel.process_mesh import ProcessMesh +from ..auto_parallel.static.dist_attribute import ( + OperatorDistAttr, + TensorDistAttr, +) +from ..auto_parallel.static.operators.common import ( SyncMode, is_data_parallel_reduce_op, ) -from ..auto_parallel.process_group import ( +from ..auto_parallel.static.process_group import ( get_all_process_groups, get_world_process_group, ) -from ..auto_parallel.process_mesh import ProcessMesh -from ..auto_parallel.reshard import Resharder -from ..auto_parallel.utils import ( +from ..auto_parallel.static.reshard import Resharder +from ..auto_parallel.static.utils import ( _get_comm_group, insert_dependencies_for_vars, is_gradient_clip_op, diff --git a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py index 4bf460d1b42889..8a87ac7f599d2f 100644 --- a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py +++ b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py @@ -15,11 +15,11 @@ from typing import Any, Dict, List, Tuple import paddle -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_optimize_op, naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/passes/auto_parallel_quantization.py b/python/paddle/distributed/passes/auto_parallel_quantization.py index f2f35b33728bb7..759e79680fc514 100644 --- a/python/paddle/distributed/passes/auto_parallel_quantization.py +++ b/python/paddle/distributed/passes/auto_parallel_quantization.py @@ -26,8 +26,11 @@ quant_config, ) -from ..auto_parallel.converter import Converter -from ..auto_parallel.dist_attribute import OperatorDistAttr, TensorDistAttr +from ..auto_parallel.static.converter import Converter +from ..auto_parallel.static.dist_attribute import ( + OperatorDistAttr, + TensorDistAttr, +) from .pass_base import PassBase, register_pass TRANSFORM_PASS_OP_TYPES = list( diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index 5de90af8e2e9ad..d64e8df305f75a 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -26,8 +26,8 @@ from paddle.framework import core from paddle.utils import unique_name -from ..auto_parallel.dist_attribute import OperatorDistAttr -from ..auto_parallel.utils import ( +from ..auto_parallel.static.dist_attribute import OperatorDistAttr +from ..auto_parallel.static.utils import ( get_loss_op, insert_dependencies_for_two_ops, is_backward_op, diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 44045155cb7737..ac1d7fd8f071f6 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -16,13 +16,15 @@ from functools import reduce import paddle -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( ParallelMode, is_data_parallel_reduce_op, is_parameter_related, ) -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import ( _get_comm_group, get_logger, get_var_numel, diff --git a/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py b/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py index c164b6e8ddbc47..7bd4024fa70d4d 100644 --- a/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py +++ b/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_amp_flag_sync_op, is_data_parallel_reduce_op, is_global_norm_sync_op, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( OpRole, insert_dependencies_for_vars, ) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 1635c7d5d211bb..a0864992c4e1c8 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -1439,7 +1439,7 @@ def find_op_index(block_desc, cur_op_desc): ) else: default_ctx = getattr( - paddle.distributed.auto_parallel.dist_context, + paddle.distributed.auto_parallel.static.dist_context, '_g_default_distributed_context', None, ) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 904a30f64fa98c..38b62736e58bba 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1681,7 +1681,7 @@ def _to_readable_code(self): if self.persistable: var_str = "persist " + var_str - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) @@ -3137,7 +3137,7 @@ def _to_readable_code(self, skip_op_callstack=True): if i != len(attr_names) - 1: attrs_str += ", " - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py index 554c578f8508cd..2a947adc030200 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py @@ -22,10 +22,10 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( set_default_distributed_context, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( get_dist_attr, load_checkpoint_into_program, load_distributed_checkpoint, diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py index 1ef9634f8db2a3..3f862705fedde5 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py @@ -23,7 +23,7 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( load_checkpoint_into_program, save_distributed_checkpoint, ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py index a2a9c9113271be..16ede226d20075 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py @@ -25,7 +25,7 @@ import paddle from paddle import distributed as dist from paddle.distributed import fleet -from paddle.distributed.auto_parallel import engine +from paddle.distributed.auto_parallel.static import engine from paddle.distributed.fleet.layers.mpu.mp_layers import ( ColumnParallelLinear, RowParallelLinear, diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py index d1104c2ce5931d..84606eb1216e2e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py @@ -17,7 +17,7 @@ import tempfile import unittest -from paddle.distributed.auto_parallel.cluster import ( +from paddle.distributed.auto_parallel.static.cluster import ( Cluster, DeviceType, LinkType, diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py index 810f99e0dd7cf7..103651728f8b07 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py @@ -18,8 +18,10 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() @@ -188,7 +190,7 @@ def test_mlp_dp_mp(self): # # dist_context) # dist_context.finalize_distributed_attr_for_program( # complete_train_program) - # from paddle.distributed.auto_parallel.interface import _g_process_mesh_map + # from paddle.distributed.auto_parallel.static.interface import _g_process_mesh_map # for block in complete_train_program.blocks: # for tensor in block.vars.values(): # desc = tensor.desc diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py index d136aa6adb54da..cc09ac989e1da1 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py @@ -18,8 +18,10 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto from paddle.fluid import layers from paddle.nn.layer.transformer import _convert_param_attr_to_list diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py index 5746df433fe465..7cf8b2d399f128 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py @@ -18,12 +18,16 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost_model import estimate_cost -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost_model import estimate_cost +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto from paddle.fluid import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py index 95b7f95c98ce6e..420e8b7f526e82 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py @@ -20,12 +20,20 @@ import paddle from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_attribute import TensorDistAttr -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_attribute import ( + TensorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.fleet import auto diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py index b8628f671c022a..a9b1fa973f7754 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.graph import Graph +from paddle.distributed.auto_parallel.static.graph import Graph class TestAutoParallelGraph(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py index 11f20b68939ec8..cae7c24a1614b1 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py @@ -23,17 +23,21 @@ import paddle.nn.functional as F from paddle import fluid, nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.mapper import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.mapper import ( get_comm_volume, get_dtype_bytes, mapping, ) -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto from paddle.fluid import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py index 33db190dfc6b32..71b6a7b7a2db79 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py @@ -19,11 +19,15 @@ import paddle.nn.functional as F from paddle import nn, static, tensor, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import _get_comm_group +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import _get_comm_group from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index 3e058bfb18e545..038f1b4854b358 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -18,11 +18,15 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import _get_comm_group +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import _get_comm_group from paddle.distributed.fleet import auto from paddle.fluid import layers from paddle.nn.layer.transformer import _convert_param_attr_to_list diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py index 4698667b985fd6..4af3fc831abe45 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py @@ -18,15 +18,19 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( ProcessGroup, _g_process_group_map, ) -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py index e59cfa1a1f12ca..b8afece8001cb5 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py @@ -18,11 +18,15 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py index 33acd017292009..ebc7b95290e691 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py @@ -18,13 +18,17 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py index 11c817b9baeea8..2ff75315725793 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py @@ -22,7 +22,7 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) from paddle.distributed.fleet import auto @@ -80,6 +80,7 @@ def forward(self, input): def mlp_forward(train_program, start_program): + print("mlp_forward outer", flush=True) with static.program_guard( train_program, start_program ), utils.unique_name.guard(): @@ -99,6 +100,7 @@ def mlp_forward(train_program, start_program): elif _global_parallel_strategy == "dp": auto.shard_tensor(input, _global_process_mesh, ["x", None]) else: + print("mlp_forward inner", flush=True) auto.shard_tensor(input, _global_process_mesh, [None, None]) mlp = MLPLayer( @@ -128,10 +130,14 @@ def get_dist_prog_with_parallelizer( dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) + print("mlp_forward before", flush=True) + loss, train_program, startup_program = mlp_forward( train_program, startup_program ) + print("mlp_forward after", flush=True) + optimizer = paddle.fluid.optimizer.AdamOptimizer( learning_rate=0.00001, beta1=0.9, @@ -185,6 +191,7 @@ def check_send_recv_result(dist_main_prog, rank_id): ) class TestMLPReshard(unittest.TestCase): def test_mlp_serial(self): + print("################-0") global _global_parallel_strategy _global_parallel_strategy = None global _global_process_mesh diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py index 277072a24e2fb4..d5bfd5889428d6 100755 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py @@ -17,13 +17,15 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.planner import PlanSpace -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.planner import PlanSpace +from paddle.distributed.auto_parallel.static.utils import ( update_op_dims_mapping_by_default_dist_impl, update_op_dims_mapping_by_elementwise_like_dist_impl, ) @@ -177,8 +179,10 @@ def test_update(self): set_default_dist_attr(train_program, dist_context, global_process_mesh) ops = train_program.global_block().ops vars = train_program.global_block().vars - from paddle.distributed.auto_parallel.dist_op import DistributedOperator - from paddle.distributed.auto_parallel.operators.common import ( + from paddle.distributed.auto_parallel.static.dist_op import ( + DistributedOperator, + ) + from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, is_elementwise_op, ) diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py index c9d7d6346ca8f1..a1c1f86bb1f3e5 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py @@ -16,9 +16,11 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.framework import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py index 19da767fcf9700..369fdec36e55a6 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py @@ -16,9 +16,11 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.fluid import core diff --git a/python/setup.py.in b/python/setup.py.in index 89acf5fe09d162..3616502cd0ab5b 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -426,9 +426,11 @@ packages=['paddle', 'paddle.distributed.fleet.meta_parallel.sharding', 'paddle.distributed.fleet.meta_parallel.parallel_layers', 'paddle.distributed.auto_parallel', - 'paddle.distributed.auto_parallel.operators', - 'paddle.distributed.auto_parallel.tuner', - 'paddle.distributed.auto_parallel.cost', + 'paddle.distributed.auto_parallel.dygraph', + 'paddle.distributed.auto_parallel.static', + 'paddle.distributed.auto_parallel.static.operators', + 'paddle.distributed.auto_parallel.static.tuner', + 'paddle.distributed.auto_parallel.static.cost', 'paddle.distributed.passes', 'paddle.distributed.models', 'paddle.distributed.models.moe', diff --git a/setup.py b/setup.py index e10308e1dffb88..cfb78743c23b6d 100644 --- a/setup.py +++ b/setup.py @@ -1409,9 +1409,11 @@ def get_setup_parameters(): 'paddle.distributed.fleet.meta_parallel.sharding', 'paddle.distributed.fleet.meta_parallel.parallel_layers', 'paddle.distributed.auto_parallel', - 'paddle.distributed.auto_parallel.operators', - 'paddle.distributed.auto_parallel.tuner', - 'paddle.distributed.auto_parallel.cost', + 'paddle.distributed.auto_parallel.dygraph', + 'paddle.distributed.auto_parallel.static', + 'paddle.distributed.auto_parallel.static.operators', + 'paddle.distributed.auto_parallel.static.tuner', + 'paddle.distributed.auto_parallel.static.cost', 'paddle.distributed.passes', 'paddle.distributed.models', 'paddle.distributed.models.moe', diff --git a/test/auto_parallel/amp_o2_pass.py b/test/auto_parallel/amp_o2_pass.py index 767b95c8083306..04af0112e31cc9 100644 --- a/test/auto_parallel/amp_o2_pass.py +++ b/test/auto_parallel/amp_o2_pass.py @@ -120,7 +120,10 @@ def test_param_grad_fuse_overlap(self): # bf16 mp_bf16_engine = self.get_engine(use_amp=True) - if not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000: + if not ( + paddle.amp.is_bfloat16_supported() + and paddle.device.cuda.get_device_capability()[0] >= 8 + ): return mp_bf16_history = mp_bf16_engine.fit( diff --git a/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py b/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py index cd11f2fabf7707..6f61cafbcd8830 100644 --- a/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py +++ b/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py @@ -20,7 +20,7 @@ from paddle import static from paddle.distributed import fleet -sys.path.append("..") +sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling from auto_parallel_gpt_model import ( GPTForPretraining, @@ -151,7 +151,7 @@ def train(): }, fetch_list=[loss], ) - print(f"step: {step}, loss: {loss_print[0]:f}") + print(f"step: {step}, loss: {loss_print:f}") else: exe.run( distributed_main_program, diff --git a/test/auto_parallel/auto_parallel_relaunch_with_planner.py b/test/auto_parallel/auto_parallel_relaunch_with_planner.py index 00b769d8c7d08e..4ad1dfb196581d 100644 --- a/test/auto_parallel/auto_parallel_relaunch_with_planner.py +++ b/test/auto_parallel/auto_parallel_relaunch_with_planner.py @@ -15,9 +15,9 @@ import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) diff --git a/test/auto_parallel/converter.py b/test/auto_parallel/converter.py index 5e0506c3785db5..411900eaa42cc7 100644 --- a/test/auto_parallel/converter.py +++ b/test/auto_parallel/converter.py @@ -15,7 +15,7 @@ import numpy as np import paddle -from paddle.distributed.auto_parallel.converter import Converter +from paddle.distributed.auto_parallel.static.converter import Converter def test_convert(): diff --git a/test/auto_parallel/test_align_tool.py b/test/auto_parallel/test_align_tool.py index c0c331b0d7fc48..500b11c78916ce 100644 --- a/test/auto_parallel/test_align_tool.py +++ b/test/auto_parallel/test_align_tool.py @@ -20,7 +20,9 @@ import paddle from paddle import fluid, nn, optimizer, static -from paddle.distributed.auto_parallel.auto_align_tool import AutoAlignTool +from paddle.distributed.auto_parallel.static.auto_align_tool import ( + AutoAlignTool, +) from paddle.vision.datasets import MNIST warnings.filterwarnings("ignore") diff --git a/test/auto_parallel/test_base_cost.py b/test/auto_parallel/test_base_cost.py index 01a488e2db3409..c9e3e64c6a8dfe 100644 --- a/test/auto_parallel/test_base_cost.py +++ b/test/auto_parallel/test_base_cost.py @@ -23,21 +23,25 @@ import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import ( AllreduceSumOpCost, _g_op_cost_factory, ) -from paddle.distributed.auto_parallel.cost.base_cost import ( +from paddle.distributed.auto_parallel.static.cost.base_cost import ( build_comm_costs_from_descs, build_comm_desc_from_dist_op, build_comp_costs_from_descs, build_comp_desc_from_dist_op, build_dp_costs, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_cluster.py b/test/auto_parallel/test_cluster.py index c25b6013fa1624..679b3f8a3cd0d3 100644 --- a/test/auto_parallel/test_cluster.py +++ b/test/auto_parallel/test_cluster.py @@ -17,7 +17,7 @@ import tempfile import unittest -from paddle.distributed.auto_parallel.cluster import ( +from paddle.distributed.auto_parallel.static.cluster import ( Cluster, get_default_cluster, ) diff --git a/test/auto_parallel/test_cluster_partition.py b/test/auto_parallel/test_cluster_partition.py index 9071b481eb5c47..25087ff16271a1 100644 --- a/test/auto_parallel/test_cluster_partition.py +++ b/test/auto_parallel/test_cluster_partition.py @@ -18,7 +18,7 @@ class TestClusterPartition(unittest.TestCase): def test_cluster_partition(self): clusters = [(5, 8), (1, 8), (4, 8), (16, 8), (2, 8), (3, 8)] - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( ClusterPartitionUtil, ) diff --git a/test/auto_parallel/test_cluster_v2.py b/test/auto_parallel/test_cluster_v2.py index 3f10fb95b846a4..671db9708e654d 100644 --- a/test/auto_parallel/test_cluster_v2.py +++ b/test/auto_parallel/test_cluster_v2.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh +from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh from paddle.framework import core diff --git a/test/auto_parallel/test_comm_cost.py b/test/auto_parallel/test_comm_cost.py index 0f664947f2760c..734cbf8ff6a115 100644 --- a/test/auto_parallel/test_comm_cost.py +++ b/test/auto_parallel/test_comm_cost.py @@ -20,8 +20,8 @@ from test_cluster import cluster_json, multi_cluster_json import paddle -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import ( AllgatherOpCost, AllreduceSumOpCost, BroadcastOpCost, diff --git a/test/auto_parallel/test_comp_cost.py b/test/auto_parallel/test_comp_cost.py index c4e4502e502637..7afb077b7e186c 100644 --- a/test/auto_parallel/test_comp_cost.py +++ b/test/auto_parallel/test_comp_cost.py @@ -18,8 +18,8 @@ from test_cluster import cluster_json -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost.comp_op_cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost.comp_op_cost import ( AssignOpCost, AssignValueOpCost, BeamSearchDecodeOpCost, diff --git a/test/auto_parallel/test_convert_to_process_meshes.py b/test/auto_parallel/test_convert_to_process_meshes.py index 120a7ba438a406..472719aef56a69 100644 --- a/test/auto_parallel/test_convert_to_process_meshes.py +++ b/test/auto_parallel/test_convert_to_process_meshes.py @@ -18,7 +18,7 @@ class TestConvertToProcessMeshes(unittest.TestCase): def test_convert_to_process_meshes(self): device_meshes = [[1, 8], [4, 8], [15, 8]] - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( convert_to_process_meshes, ) diff --git a/test/auto_parallel/test_converter.py b/test/auto_parallel/test_converter.py index edd888acf69847..f6b95011fc9f34 100644 --- a/test/auto_parallel/test_converter.py +++ b/test/auto_parallel/test_converter.py @@ -18,7 +18,7 @@ import tempfile import unittest -from paddle.distributed.auto_parallel.converter import Converter +from paddle.distributed.auto_parallel.static.converter import Converter class TestConverter(unittest.TestCase): diff --git a/test/auto_parallel/test_dist_assign.py b/test/auto_parallel/test_dist_assign.py index 87064a45a49ffb..b7cdb0d6b7f0f8 100644 --- a/test/auto_parallel/test_dist_assign.py +++ b/test/auto_parallel/test_dist_assign.py @@ -38,9 +38,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_attr_v2.py b/test/auto_parallel/test_dist_attr_v2.py index 1d15c34221f908..37f13f5af9d422 100644 --- a/test/auto_parallel/test_dist_attr_v2.py +++ b/test/auto_parallel/test_dist_attr_v2.py @@ -21,12 +21,12 @@ import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( _copy_dist_attr_from_cpp, _copy_dist_attr_from_cpp_for_graph, _copy_dist_attr_to_cpp, diff --git a/test/auto_parallel/test_dist_context.py b/test/auto_parallel/test_dist_context.py index 2944b2db2a3fb2..695949fd698c0f 100644 --- a/test/auto_parallel/test_dist_context.py +++ b/test/auto_parallel/test_dist_context.py @@ -21,7 +21,9 @@ import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_dist_matmul.py b/test/auto_parallel/test_dist_matmul.py index 0a07b98de705d8..77c15942709c25 100644 --- a/test/auto_parallel/test_dist_matmul.py +++ b/test/auto_parallel/test_dist_matmul.py @@ -103,9 +103,11 @@ def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y): def parallelizer(program_func, *args, **kwargs): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program, loss = program_func(*args, **kwargs) diff --git a/test/auto_parallel/test_dist_op_cost.py b/test/auto_parallel/test_dist_op_cost.py index ecff2bbf8935b5..4d7cca7e5b3329 100644 --- a/test/auto_parallel/test_dist_op_cost.py +++ b/test/auto_parallel/test_dist_op_cost.py @@ -16,8 +16,8 @@ import unittest import paddle -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, is_elementwise_op, ) @@ -29,8 +29,10 @@ def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) main_program, startup_program, loss = program_func() diff --git a/test/auto_parallel/test_dist_pnorm.py b/test/auto_parallel/test_dist_pnorm.py index 5ff30d27b6d6e5..623114208150c5 100644 --- a/test/auto_parallel/test_dist_pnorm.py +++ b/test/auto_parallel/test_dist_pnorm.py @@ -75,9 +75,11 @@ def make_program_serial(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program, loss = program_func() diff --git a/test/auto_parallel/test_dist_reshape.py b/test/auto_parallel/test_dist_reshape.py index 8dd84da9175c41..743cda599e40a3 100644 --- a/test/auto_parallel/test_dist_reshape.py +++ b/test/auto_parallel/test_dist_reshape.py @@ -37,9 +37,11 @@ def make_program_dp2(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_scale.py b/test/auto_parallel/test_dist_scale.py index b68131e361ec04..270f6951ece2ef 100644 --- a/test/auto_parallel/test_dist_scale.py +++ b/test/auto_parallel/test_dist_scale.py @@ -34,9 +34,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_shape.py b/test/auto_parallel/test_dist_shape.py index 0322a817934fdf..6bc33e82dac8df 100644 --- a/test/auto_parallel/test_dist_shape.py +++ b/test/auto_parallel/test_dist_shape.py @@ -34,9 +34,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_slice.py b/test/auto_parallel/test_dist_slice.py index cdca9904d622b7..e94dcf32f7bf9c 100644 --- a/test/auto_parallel/test_dist_slice.py +++ b/test/auto_parallel/test_dist_slice.py @@ -56,9 +56,11 @@ def make_program_serial(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_split.py b/test/auto_parallel/test_dist_split.py index edc711ea4c8410..b44d180685edd8 100644 --- a/test/auto_parallel/test_dist_split.py +++ b/test/auto_parallel/test_dist_split.py @@ -34,9 +34,11 @@ def make_program_dp2(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_engine_callbacks.py b/test/auto_parallel/test_engine_callbacks.py index d62cff86245816..f00d62cc035bf1 100644 --- a/test/auto_parallel/test_engine_callbacks.py +++ b/test/auto_parallel/test_engine_callbacks.py @@ -20,7 +20,7 @@ import paddle import paddle.vision.transforms as T -from paddle.distributed.auto_parallel.callbacks import config_callbacks +from paddle.distributed.auto_parallel.static.callbacks import config_callbacks from paddle.distributed.fleet import auto from paddle.static import InputSpec from paddle.vision.datasets import MNIST diff --git a/test/auto_parallel/test_fp16_assign.py b/test/auto_parallel/test_fp16_assign.py index eb34226ac89187..b1a13d81148f07 100644 --- a/test/auto_parallel/test_fp16_assign.py +++ b/test/auto_parallel/test_fp16_assign.py @@ -64,9 +64,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_group_operators.py b/test/auto_parallel/test_group_operators.py index 6dea719a11180a..aec75934e5e900 100644 --- a/test/auto_parallel/test_group_operators.py +++ b/test/auto_parallel/test_group_operators.py @@ -112,10 +112,10 @@ def test_gpt(self): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_interface.py b/test/auto_parallel/test_interface.py index 3d57049410acf5..5ea4209a6253b8 100644 --- a/test/auto_parallel/test_interface.py +++ b/test/auto_parallel/test_interface.py @@ -17,10 +17,10 @@ import paddle import paddle.nn.functional as F from paddle import nn, static -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_new_cost_model.py b/test/auto_parallel/test_new_cost_model.py index 8439df7ae88bb2..b3e9016e4d20b7 100644 --- a/test/auto_parallel/test_new_cost_model.py +++ b/test/auto_parallel/test_new_cost_model.py @@ -20,10 +20,10 @@ from test_cluster import cluster_json import paddle -import paddle.distributed.auto_parallel.cost as cost_model -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import CommContext -from paddle.distributed.auto_parallel.cost.base_cost import ( +import paddle.distributed.auto_parallel.static.cost as cost_model +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import CommContext +from paddle.distributed.auto_parallel.static.cost.base_cost import ( build_comp_desc_from_op, build_comp_desc_str_for_predict, calc_time_by_modeling, diff --git a/test/auto_parallel/test_parallel_tuner.py b/test/auto_parallel/test_parallel_tuner.py index 258bf0c398b2a5..76203cbfc9ad98 100644 --- a/test/auto_parallel/test_parallel_tuner.py +++ b/test/auto_parallel/test_parallel_tuner.py @@ -18,13 +18,15 @@ import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_parallel_tuner_full.py b/test/auto_parallel/test_parallel_tuner_full.py index 7df76ef097e064..181f77b0eb9dde 100644 --- a/test/auto_parallel/test_parallel_tuner_full.py +++ b/test/auto_parallel/test_parallel_tuner_full.py @@ -18,15 +18,17 @@ import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.planner_v2 import Planner -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.planner_v2 import Planner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) from paddle.distributed.auto_parallel.strategy import Strategy -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_parallel_tuner_predict.py b/test/auto_parallel/test_parallel_tuner_predict.py index 1e3c6ea87e8f25..63b9186c0c8acd 100644 --- a/test/auto_parallel/test_parallel_tuner_predict.py +++ b/test/auto_parallel/test_parallel_tuner_predict.py @@ -18,13 +18,15 @@ import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_pattern.py b/test/auto_parallel/test_pattern.py index bdccc68d984fc5..1f7e89c08c52ca 100644 --- a/test/auto_parallel/test_pattern.py +++ b/test/auto_parallel/test_pattern.py @@ -112,7 +112,7 @@ def test_gpt(self): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( _PATTERNS, GraphUtil, ) diff --git a/test/auto_parallel/test_pattern_match.py b/test/auto_parallel/test_pattern_match.py index c240969ef9ddc6..0bbf7af68a0cc7 100644 --- a/test/auto_parallel/test_pattern_match.py +++ b/test/auto_parallel/test_pattern_match.py @@ -112,10 +112,10 @@ def test_gpt(self): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( GraphUtil, RuleBasedTuner, ) diff --git a/test/auto_parallel/test_prim_dist_op.py b/test/auto_parallel/test_prim_dist_op.py index 5a4a1b5a512a3d..b92f550d41fa30 100644 --- a/test/auto_parallel/test_prim_dist_op.py +++ b/test/auto_parallel/test_prim_dist_op.py @@ -15,13 +15,13 @@ import unittest import paddle -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, get_default_distributed_context, ) -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.utils import set_var_dist_attr +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.utils import set_var_dist_attr from paddle.distributed.fleet import auto from paddle.fluid.layer_helper import LayerHelper from paddle.incubate.autograd import enable_prim diff --git a/test/auto_parallel/test_process_mesh.py b/test/auto_parallel/test_process_mesh.py index 07da754e7970f1..d4b91a5dcc345d 100644 --- a/test/auto_parallel/test_process_mesh.py +++ b/test/auto_parallel/test_process_mesh.py @@ -19,14 +19,14 @@ import paddle import paddle.nn.functional as F from paddle import nn, static -from paddle.distributed.auto_parallel.dist_context import ( - get_default_distributed_context, -) from paddle.distributed.auto_parallel.process_mesh import ( ProcessMesh, compute_compatible_process_mesh, merge_process_meshes, ) +from paddle.distributed.auto_parallel.static.dist_context import ( + get_default_distributed_context, +) paddle.enable_static() diff --git a/test/auto_parallel/test_process_mesh_v2.py b/test/auto_parallel/test_process_mesh_v2.py index 03ec95c71870f7..0d98caad3a7bf0 100644 --- a/test/auto_parallel/test_process_mesh_v2.py +++ b/test/auto_parallel/test_process_mesh_v2.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.process_mesh_v2 import ( +from paddle.distributed.auto_parallel.static.process_mesh_v2 import ( ProcessMesh, compute_compatible_process_mesh, merge_process_mesh, diff --git a/test/auto_parallel/test_recorder.py b/test/auto_parallel/test_recorder.py index eaaefcbe0733c2..185d3d3ef3d508 100644 --- a/test/auto_parallel/test_recorder.py +++ b/test/auto_parallel/test_recorder.py @@ -16,7 +16,7 @@ import numpy as np -from paddle.distributed.auto_parallel.tuner import recorder as rd +from paddle.distributed.auto_parallel.static.tuner import recorder as rd class TestRecorder(unittest.TestCase): diff --git a/test/auto_parallel/test_rule_based_tuner.py b/test/auto_parallel/test_rule_based_tuner.py index a3ef694b5c3628..7c4c980fd992eb 100644 --- a/test/auto_parallel/test_rule_based_tuner.py +++ b/test/auto_parallel/test_rule_based_tuner.py @@ -112,11 +112,11 @@ def test_gpt(self): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.cluster import Cluster - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.cluster import Cluster + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_rule_based_tuner_o2.py b/test/auto_parallel/test_rule_based_tuner_o2.py index 999535d7204480..5fdb1fc83e96af 100644 --- a/test/auto_parallel/test_rule_based_tuner_o2.py +++ b/test/auto_parallel/test_rule_based_tuner_o2.py @@ -112,11 +112,11 @@ def test_gpt_o2(self): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.cluster import Cluster - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.cluster import Cluster + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_serialization.py b/test/auto_parallel/test_serialization.py index d89c9596f4cdb0..495f3adf620243 100644 --- a/test/auto_parallel/test_serialization.py +++ b/test/auto_parallel/test_serialization.py @@ -20,11 +20,11 @@ import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh +from paddle.distributed.auto_parallel.static.process_mesh_v2 import ProcessMesh from paddle.distributed.fleet import auto from paddle.fluid.core import TensorDistAttr from paddle.fluid.framework import Program diff --git a/test/auto_parallel/test_to_static.py b/test/auto_parallel/test_to_static.py index 2057d509ad1053..1550c2d2669f00 100644 --- a/test/auto_parallel/test_to_static.py +++ b/test/auto_parallel/test_to_static.py @@ -19,7 +19,10 @@ import paddle import paddle.nn.functional as F from paddle import LazyGuard, nn -from paddle.distributed.auto_parallel.helper import ProgramHelper, ProxyLayer +from paddle.distributed.auto_parallel.static.helper import ( + ProgramHelper, + ProxyLayer, +) from paddle.distributed.fleet import auto from paddle.framework import in_dynamic_mode from paddle.io import Dataset diff --git a/test/auto_parallel/test_topology.py b/test/auto_parallel/test_topology.py index 6807d22ffc3f15..0119821532e263 100644 --- a/test/auto_parallel/test_topology.py +++ b/test/auto_parallel/test_topology.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.topo import SingleNodeTopology +from paddle.distributed.auto_parallel.static.topo import SingleNodeTopology def check_empty_json_object(json_object): diff --git a/test/auto_parallel/test_trial.py b/test/auto_parallel/test_trial.py index 5fcf38b2e65e6c..7861ab82f8f887 100644 --- a/test/auto_parallel/test_trial.py +++ b/test/auto_parallel/test_trial.py @@ -14,8 +14,8 @@ import unittest -from paddle.distributed.auto_parallel.tuner import trial as tr -from paddle.distributed.auto_parallel.tuner import tunable_space as ts +from paddle.distributed.auto_parallel.static.tuner import trial as tr +from paddle.distributed.auto_parallel.static.tuner import tunable_space as ts class TestTiral(unittest.TestCase): diff --git a/test/auto_parallel/test_tunable_space.py b/test/auto_parallel/test_tunable_space.py index badc90275fd38a..b32e96107b54d4 100644 --- a/test/auto_parallel/test_tunable_space.py +++ b/test/auto_parallel/test_tunable_space.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.tuner import tunable_space as ts +from paddle.distributed.auto_parallel.static.tuner import tunable_space as ts class TestTunableSpace(unittest.TestCase): diff --git a/test/auto_parallel/test_tunable_variable.py b/test/auto_parallel/test_tunable_variable.py index 641f7b4347e36f..208ecf7238ffa3 100644 --- a/test/auto_parallel/test_tunable_variable.py +++ b/test/auto_parallel/test_tunable_variable.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.tuner import tunable_variable as tv +from paddle.distributed.auto_parallel.static.tuner import tunable_variable as tv class TestTunableVariable(unittest.TestCase): diff --git a/test/auto_parallel/test_while_op_completion.py b/test/auto_parallel/test_while_op_completion.py index 3f9b5b151ab08e..67887916c66625 100644 --- a/test/auto_parallel/test_while_op_completion.py +++ b/test/auto_parallel/test_while_op_completion.py @@ -20,8 +20,10 @@ import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_while_op_partition.py b/test/auto_parallel/test_while_op_partition.py index 00f3a70bbcf42c..ef3189542cb5d0 100644 --- a/test/auto_parallel/test_while_op_partition.py +++ b/test/auto_parallel/test_while_op_partition.py @@ -20,12 +20,12 @@ import paddle.nn.functional as F from paddle import fluid, nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.utils import make_data_unshard +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.utils import make_data_unshard from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py index aa989df7025793..33672c3fa7f211 100644 --- a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py +++ b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py @@ -23,10 +23,10 @@ import paddle from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_data_parallel_reduce_op, ) from paddle.distributed.passes import PassContext, new_pass