Skip to content

FedML-AI Docstrings Update #1279

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 77 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
77 commits
Select commit Hold shift + click to select a range
752a456
Update data_loader.py
rajveer43 Sep 1, 2023
5cbbafd
Merge branch 'FedML-AI:master' into docs-patch-1
rajveer43 Sep 6, 2023
f7a560e
code and docstring update
rajveer43 Sep 6, 2023
19e377d
Merge branch 'docs-patch-1' of https://github.com/rajveer43/FedML int…
rajveer43 Sep 6, 2023
f5c94b8
additon
rajveer43 Sep 6, 2023
012fd9c
`fedml\simulation\sp\3 folder` update
rajveer43 Sep 6, 2023
8ab163c
same as previous
rajveer43 Sep 6, 2023
e554592
same
rajveer43 Sep 6, 2023
e56c105
Merge branch 'FedML-AI:master' into docs-patch-1
rajveer43 Sep 7, 2023
01381ee
`python\fedml\utils\ `update
rajveer43 Sep 7, 2023
f6065fe
udpate
rajveer43 Sep 7, 2023
6d6b355
`python\fedml\simulation\nccl`
rajveer43 Sep 7, 2023
696f2d2
`python\fedml\simulation\mpi
rajveer43 Sep 7, 2023
c4472b1
python\fedml\simulation\mpi\async_fedavg
rajveer43 Sep 7, 2023
e7162ef
Merge branch 'FedML-AI:master' into docs-patch-1
rajveer43 Sep 8, 2023
93231e3
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
161b875
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
5c44b01
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
d93b711
Merge branch 'FedML-AI:master' into docs-patch-1
rajveer43 Sep 8, 2023
b399769
python\fedml\simulation\mpi\
rajveer43 Sep 9, 2023
6356bfb
g
rajveer43 Sep 9, 2023
73a4289
gg
rajveer43 Sep 9, 2023
6ae730d
uodtae
rajveer43 Sep 9, 2023
3e80913
docs
rajveer43 Sep 9, 2023
d817a67
j
rajveer43 Sep 9, 2023
e09341c
qdd ds
rajveer43 Sep 10, 2023
a3bc61a
model done
rajveer43 Sep 13, 2023
0884292
23
rajveer43 Sep 13, 2023
7200df3
Merge branch 'FedML-AI:master' into docs-patch-1
rajveer43 Sep 15, 2023
d8ae561
n
rajveer43 Sep 15, 2023
d6686ba
ed
rajveer43 Sep 16, 2023
4057c0f
add
rajveer43 Sep 18, 2023
1fcfc56
fg
rajveer43 Sep 18, 2023
fc72f1e
update
rajveer43 Sep 19, 2023
832356c
add
rajveer43 Sep 20, 2023
1335977
add
rajveer43 Sep 21, 2023
19b64e5
push
rajveer43 Sep 22, 2023
50a4b9b
add docstrins
rajveer43 Sep 23, 2023
c80d0d8
thread
rajveer43 Sep 24, 2023
fe18ff0
Update mqtt_manager.py
rajveer43 Sep 26, 2023
541703f
Update mqtt_manager.py
rajveer43 Sep 27, 2023
803f21e
Update data_loader.py
rajveer43 Sep 1, 2023
f1ce786
code and docstring update
rajveer43 Sep 6, 2023
a958d9e
additon
rajveer43 Sep 6, 2023
b85e43e
`fedml\simulation\sp\3 folder` update
rajveer43 Sep 6, 2023
84784ff
same as previous
rajveer43 Sep 6, 2023
3881512
same
rajveer43 Sep 6, 2023
092d3a2
`python\fedml\utils\ `update
rajveer43 Sep 7, 2023
d100a0b
udpate
rajveer43 Sep 7, 2023
d4471bf
`python\fedml\simulation\nccl`
rajveer43 Sep 7, 2023
14c75cb
`python\fedml\simulation\mpi
rajveer43 Sep 7, 2023
a8d0246
python\fedml\simulation\mpi\async_fedavg
rajveer43 Sep 7, 2023
c755ec9
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
0a6b8d5
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
0a96491
python\fedml\simulation\mpi
rajveer43 Sep 8, 2023
f93ca88
python\fedml\simulation\mpi\
rajveer43 Sep 9, 2023
75483ba
g
rajveer43 Sep 9, 2023
bcb67fe
gg
rajveer43 Sep 9, 2023
988b48f
uodtae
rajveer43 Sep 9, 2023
4f00cd9
docs
rajveer43 Sep 9, 2023
3f23f67
j
rajveer43 Sep 9, 2023
b2f820a
qdd ds
rajveer43 Sep 10, 2023
1714f22
model done
rajveer43 Sep 13, 2023
10c251c
23
rajveer43 Sep 13, 2023
1e67bd2
n
rajveer43 Sep 15, 2023
c12a139
ed
rajveer43 Sep 16, 2023
843b0e8
add
rajveer43 Sep 18, 2023
f670535
fg
rajveer43 Sep 18, 2023
ac91c95
update
rajveer43 Sep 19, 2023
96e3118
add
rajveer43 Sep 20, 2023
9cd3e32
add
rajveer43 Sep 21, 2023
e4b35ed
push
rajveer43 Sep 22, 2023
048d7a9
add docstrins
rajveer43 Sep 23, 2023
1e9c0a0
thread
rajveer43 Sep 24, 2023
feaaab9
Update mqtt_manager.py
rajveer43 Sep 26, 2023
b96cc67
Update mqtt_manager.py
rajveer43 Sep 27, 2023
fad8537
thread
rajveer43 Sep 29, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
45 changes: 44 additions & 1 deletion python/examples/launch/serve_mnist/model/minist_model.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,54 @@
import torch
class LogisticRegression(torch.nn.Module):
"""
Logistic Regression model for binary classification.

This class defines a logistic regression model with a single linear layer followed by a sigmoid activation function
for binary classification tasks.

Args:
input_dim (int): The dimensionality of the input features.
output_dim (int): The number of output classes, which should be 1 for binary classification.

Example:
# Create a logistic regression model for binary classification
input_dim = 10
output_dim = 1
model = LogisticRegression(input_dim, output_dim)

Forward Method:
The forward method computes the output of the model for a given input.

Example:
# Forward pass with input tensor 'x'
input_tensor = torch.tensor([0.1, 0.2, 0.3, ..., 0.9])
output = model(input_tensor)

Note:
- For binary classification, the `output_dim` should be set to 1.
- The `forward` method applies a sigmoid activation to the linear output, producing values in the range [0, 1].

"""

def __init__(self, input_dim, output_dim):
super(LogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)

def forward(self, x):
import torch
"""
Forward pass of the logistic regression model.

Args:
x (torch.Tensor): Input tensor of shape (batch_size, input_dim).

Returns:
torch.Tensor: Output tensor of shape (batch_size, output_dim).

Example:
# Forward pass with input tensor 'x'
input_tensor = torch.tensor([0.1, 0.2, 0.3, ..., 0.9])
output = model(input_tensor)
"""
outputs = torch.sigmoid(self.linear(x))
return outputs

224 changes: 169 additions & 55 deletions python/fedml/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,56 @@
)
from .core.common.ml_engine_backend import MLEngineBackend

from fedml import device
from fedml import data
from fedml import model
from fedml import mlops

from .arguments import load_arguments

from .launch_simulation import run_simulation

from .launch_cross_silo_horizontal import run_cross_silo_server
from .launch_cross_silo_horizontal import run_cross_silo_client

from .launch_cross_silo_hi import run_hierarchical_cross_silo_server
from .launch_cross_silo_hi import run_hierarchical_cross_silo_client

from .launch_cheeath import run_cheetah_server
from .launch_cheeath import run_cheetah_client

from .launch_serving import run_model_serving_client
from .launch_serving import run_model_serving_server

from .launch_cross_device import run_mnn_server

from .core.common.ml_engine_backend import MLEngineBackend

from .runner import FedMLRunner

from fedml import api

__all__ = [
"MLEngineBackend",
"device",
"data",
"model",
"mlops",
"FedMLRunner",
"run_simulation",
"run_cross_silo_server",
"run_cross_silo_client",
"run_hierarchical_cross_silo_server",
"run_hierarchical_cross_silo_client",
"run_cheetah_server",
"run_cheetah_client",
"run_model_serving_client",
"run_model_serving_server",
"run_mnn_server",
"api"
]


_global_training_type = None
_global_comm_backend = None

Expand All @@ -32,7 +82,17 @@ def init(args=None, check_env=True, should_init_logs=True):
if args is None:
args = load_arguments(fedml._global_training_type, fedml._global_comm_backend)

"""Initialize FedML Engine."""
"""
Initialize the FedML Engine.

Args:
args (argparse.Namespace, optional): Command-line arguments. Defaults to None.
check_env (bool, optional): Whether to check the environment. Defaults to True.
should_init_logs (bool, optional): Whether to initialize logs. Defaults to True.

Returns:
argparse.Namespace: Updated command-line arguments.
"""
if check_env:
collect_env(args)

Expand Down Expand Up @@ -120,6 +180,12 @@ def init(args=None, check_env=True, should_init_logs=True):


def print_args(args):
"""
Print the arguments to the log, excluding sensitive paths.

Args:
args (argparse.Namespace): Command-line arguments.
"""
mqtt_config_path = None
s3_config_path = None
args_copy = args
Expand All @@ -138,7 +204,9 @@ def print_args(args):

def update_client_specific_args(args):
"""
data_silo_config is used for reading specific configuration for each client
Update client-specific arguments based on data_silo_config.

data_silo_config is used for reading specific configuration for each client
Example: In fedml_config.yaml, we have the following configuration
client_specific_args:
data_silo_config:
Expand All @@ -149,6 +217,9 @@ def update_client_specific_args(args):
fedml_config/data_silo_4_config.yaml,
]
data_silo_1_config.yaml contains some client client speicifc arguments.

Args:
args (argparse.Namespace): Command-line arguments.
"""
if (
hasattr(args, "data_silo_config")
Expand All @@ -166,7 +237,17 @@ def update_client_specific_args(args):


def init_simulation_mpi(args):

from mpi4py import MPI
"""
Initialize MPI-based simulation.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
argparse.Namespace: Updated command-line arguments.
"""

comm = MPI.COMM_WORLD
process_id = comm.Get_rank()
Expand All @@ -183,14 +264,35 @@ def init_simulation_mpi(args):


def init_simulation_sp(args):
"""
Initialize single-process simulation.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
argparse.Namespace: Updated command-line arguments.
"""
return args


def init_simulation_nccl(args):
"""
Initialize NCCL-based simulation.

Args:
args (argparse.Namespace): Command-line arguments.
"""
return


def manage_profiling_args(args):
"""
Manage profiling-related arguments and configurations.

Args:
args (argparse.Namespace): Command-line arguments.
"""
if not hasattr(args, "sys_perf_profiling"):
args.sys_perf_profiling = True
if not hasattr(args, "sys_perf_profiling"):
Expand Down Expand Up @@ -236,6 +338,12 @@ def manage_profiling_args(args):


def manage_cuda_rpc_args(args):
"""
Manage CUDA RPC-related arguments and configurations.

Args:
args (argparse.Namespace): Command-line arguments.
"""

if (not hasattr(args, "enable_cuda_rpc")) or (not args.using_gpu):
args.enable_cuda_rpc = False
Expand Down Expand Up @@ -264,6 +372,12 @@ def manage_cuda_rpc_args(args):


def manage_mpi_args(args):
"""
Manage MPI-related arguments and configurations.

Args:
args (argparse.Namespace): Command-line arguments.
"""
if hasattr(args, "backend") and args.backend == "MPI":
from mpi4py import MPI

Expand All @@ -282,6 +396,15 @@ def manage_mpi_args(args):
args.comm = None

def init_cross_silo_horizontal(args):
"""
Initialize the cross-silo training for the horizontal scenario.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
args (argparse.Namespace): Updated command-line arguments.
"""
args.n_proc_in_silo = 1
args.proc_rank_in_silo = 0
manage_mpi_args(args)
Expand All @@ -291,6 +414,15 @@ def init_cross_silo_horizontal(args):


def init_cross_silo_hierarchical(args):
"""
Initialize the cross-silo training for the hierarchical scenario.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
args (argparse.Namespace): Updated command-line arguments.
"""
manage_mpi_args(args)
manage_cuda_rpc_args(args)

Expand Down Expand Up @@ -344,6 +476,15 @@ def init_cross_silo_hierarchical(args):


def init_cheetah(args):
"""
Initialize the CheetaH training scenario.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
args (argparse.Namespace): Updated command-line arguments.
"""
args.n_proc_in_silo = 1
args.proc_rank_in_silo = 0
manage_mpi_args(args)
Expand All @@ -353,6 +494,15 @@ def init_cheetah(args):


def init_model_serving(args):
"""
Initialize the model serving scenario.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
args (argparse.Namespace): Updated command-line arguments.
"""
args.n_proc_in_silo = 1
args.proc_rank_in_silo = 0
manage_cuda_rpc_args(args)
Expand All @@ -361,10 +511,12 @@ def init_model_serving(args):


def update_client_id_list(args):

"""
generate args.client_id_list for CLI mode where args.client_id_list is set to None
In MLOps mode, args.client_id_list will be set to real-time client id list selected by UI (not starting from 1)
Generate args.client_id_list for the CLI mode where args.client_id_list is set to None.
In MLOps mode, args.client_id_list will be set to a real-time client id list selected by the UI (not starting from 1).

Args:
args (argparse.Namespace): Command-line arguments.
"""
if not hasattr(args, "using_mlops") or (hasattr(args, "using_mlops") and not args.using_mlops):
if not hasattr(args, "client_id_list") or args.client_id_list is None or args.client_id_list == "None" or args.client_id_list == "[]":
Expand Down Expand Up @@ -396,60 +548,22 @@ def update_client_id_list(args):


def init_cross_device(args):
"""
Initialize the cross-device training scenario.

Args:
args (argparse.Namespace): Command-line arguments.

Returns:
args (argparse.Namespace): Updated command-line arguments.
"""
args.rank = 0 # only server runs on Python package
args.role = "server"
return args


def run_distributed():
"""
Placeholder function for running distributed training.
"""
pass


from fedml import device
from fedml import data
from fedml import model
from fedml import mlops

from .arguments import load_arguments

from .launch_simulation import run_simulation

from .launch_cross_silo_horizontal import run_cross_silo_server
from .launch_cross_silo_horizontal import run_cross_silo_client

from .launch_cross_silo_hi import run_hierarchical_cross_silo_server
from .launch_cross_silo_hi import run_hierarchical_cross_silo_client

from .launch_cheeath import run_cheetah_server
from .launch_cheeath import run_cheetah_client

from .launch_serving import run_model_serving_client
from .launch_serving import run_model_serving_server

from .launch_cross_device import run_mnn_server

from .core.common.ml_engine_backend import MLEngineBackend

from .runner import FedMLRunner

from fedml import api

__all__ = [
"MLEngineBackend",
"device",
"data",
"model",
"mlops",
"FedMLRunner",
"run_simulation",
"run_cross_silo_server",
"run_cross_silo_client",
"run_hierarchical_cross_silo_server",
"run_hierarchical_cross_silo_client",
"run_cheetah_server",
"run_cheetah_client",
"run_model_serving_client",
"run_model_serving_server",
"run_mnn_server",
"api"
]
Loading