Skip to content

Commit

Permalink
fixed flake-8 errors
Browse files Browse the repository at this point in the history
Signed-off-by: ParthM-GitHub <[email protected]>
  • Loading branch information
KeertiX authored and ParthM-GitHub committed May 10, 2023
1 parent d9173f1 commit 1fc8d3a
Show file tree
Hide file tree
Showing 14 changed files with 146 additions and 100 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
# Fixing the seed for result repeatation: remove below to stop repeatable runs
# ----------------------------------
random_seed = 5495300300540669060
g_device = torch.Generator(device='cuda')
g_device = torch.Generator(device="cuda")
# Uncomment the line below to use g_cpu if not using cuda
# g_device = torch.Generator() # noqa: E800
# NOTE: remove below to stop repeatable runs
Expand Down Expand Up @@ -601,7 +601,6 @@ def end(self):


if __name__ == "__main__":

argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--config_path", help="Absolute path to the flow configuration file"
Expand All @@ -615,9 +614,7 @@ def end(self):
args = argparser.parse_args()

if torch.cuda.is_available():
device = torch.device(
"cuda:0"
)
device = torch.device("cuda:0")
else:
device = torch.device("cpu")

Expand All @@ -638,8 +635,9 @@ def end(self):
"Guadalajara",
]

def callable_to_initialize_collaborator_private_attributes(index, n_collaborators,
batch_size, train_dataset, test_dataset):
def callable_to_initialize_collaborator_private_attributes(
index, n_collaborators, batch_size, train_dataset, test_dataset
):
train = deepcopy(train_dataset)
test = deepcopy(test_dataset)
train.data = train_dataset.data[index::n_collaborators]
Expand All @@ -648,8 +646,12 @@ def callable_to_initialize_collaborator_private_attributes(index, n_collaborator
test.targets = test_dataset.targets[index::n_collaborators]

return {
"train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True),
"test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True),
"train_loader": torch.utils.data.DataLoader(
train, batch_size=batch_size, shuffle=True
),
"test_loader": torch.utils.data.DataLoader(
test, batch_size=batch_size, shuffle=True
),
}

collaborators = []
Expand All @@ -659,14 +661,19 @@ def callable_to_initialize_collaborator_private_attributes(index, n_collaborator
name=collaborator_name,
private_attributes_callable=callable_to_initialize_collaborator_private_attributes,
# Set `num_gpus=0.1` to `num_gpus=0.0` in order to run this tutorial on CPU
num_cpus=0.0, num_gpus=0.1, # Assuming GPU(s) is available in the machine
index=idx, n_collaborators=len(collaborator_names),
batch_size=batch_size_train, train_dataset=mnist_train,
test_dataset=mnist_test
num_cpus=0.0,
num_gpus=0.1, # Assuming GPU(s) is available in the machine
index=idx,
n_collaborators=len(collaborator_names),
batch_size=batch_size_train,
train_dataset=mnist_train,
test_dataset=mnist_test,
)
)

local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="ray")
local_runtime = LocalRuntime(
aggregator=aggregator, collaborators=collaborators, backend="ray"
)
print(f"Local runtime collaborators = {local_runtime.collaborators}")

top_model_accuracy = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

random_seed = 5495300300540669060

g_device = torch.Generator(device='cuda')
g_device = torch.Generator(device="cuda")
# Uncomment the line below to use g_cpu if not using cuda
# g_device = torch.Generator() # noqa: E800
# NOTE: remove below to stop repeatable runs
Expand Down Expand Up @@ -580,7 +580,6 @@ def end(self):


if __name__ == "__main__":

argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--config_path", help="Absolute path to the flow configuration file."
Expand All @@ -594,9 +593,7 @@ def end(self):
args = argparser.parse_args()

if torch.cuda.is_available():
device = torch.device(
"cuda:0"
)
device = torch.device("cuda:0")
else:
device = torch.device("cpu")

Expand All @@ -617,8 +614,9 @@ def end(self):
"Guadalajara",
]

def callable_to_initialize_collaborator_private_attributes(index, n_collaborators,
batch_size, train_dataset, test_dataset):
def callable_to_initialize_collaborator_private_attributes(
index, n_collaborators, batch_size, train_dataset, test_dataset
):
train = deepcopy(train_dataset)
test = deepcopy(test_dataset)
train.data = train_dataset.data[index::n_collaborators]
Expand All @@ -627,8 +625,12 @@ def callable_to_initialize_collaborator_private_attributes(index, n_collaborator
test.targets = test_dataset.targets[index::n_collaborators]

return {
"train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True),
"test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True),
"train_loader": torch.utils.data.DataLoader(
train, batch_size=batch_size, shuffle=True
),
"test_loader": torch.utils.data.DataLoader(
test, batch_size=batch_size, shuffle=True
),
}

collaborators = []
Expand All @@ -638,14 +640,19 @@ def callable_to_initialize_collaborator_private_attributes(index, n_collaborator
name=collaborator_name,
private_attributes_callable=callable_to_initialize_collaborator_private_attributes,
# Set `num_gpus=0.1` to `num_gpus=0.0` in order to run this tutorial on CPU
num_cpus=0.0, num_gpus=0.1, # Assuming GPU(s) is available in the machine
index=idx, n_collaborators=len(collaborator_names),
batch_size=batch_size_train, train_dataset=mnist_train,
test_dataset=mnist_test
num_cpus=0.0,
num_gpus=0.1, # Assuming GPU(s) is available in the machine
index=idx,
n_collaborators=len(collaborator_names),
batch_size=batch_size_train,
train_dataset=mnist_train,
test_dataset=mnist_test,
)
)

local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="ray")
local_runtime = LocalRuntime(
aggregator=aggregator, collaborators=collaborators, backend="ray"
)
print(f"Local runtime collaborators = {local_runtime.collaborators}")

best_model = None
Expand Down
33 changes: 18 additions & 15 deletions openfl-tutorials/experimental/Privacy_Meter/cifar10_PM.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ def inference(network, test_loader, device):


def optimizer_to_device(optimizer, device):

"""
Sending the "torch.optim.Optimizer" object into the specified device
for model training and inference
Expand Down Expand Up @@ -581,7 +580,6 @@ def end(self):


if __name__ == "__main__":

argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--audit_dataset_ratio",
Expand Down Expand Up @@ -663,9 +661,7 @@ def end(self):
collaborator_names = ["Portland", "Seattle"]

if torch.cuda.is_available():
device = torch.device(
"cuda:0"
)
device = torch.device("cuda:0")
else:
device = torch.device("cpu")

Expand Down Expand Up @@ -693,9 +689,9 @@ def end(self):
train_dataset.targets = Y[:train_dataset_size]

test_dataset = deepcopy(cifar_test)
test_dataset.data = X[train_dataset_size:train_dataset_size + test_dataset_size]
test_dataset.data = X[train_dataset_size: train_dataset_size + test_dataset_size]
test_dataset.targets = Y[
train_dataset_size:train_dataset_size + test_dataset_size
train_dataset_size: train_dataset_size + test_dataset_size
]

population_dataset = deepcopy(cifar_test)
Expand All @@ -715,7 +711,8 @@ def end(self):
# this function will be called before executing collaborator steps
# which will return private attributes dictionary for each collaborator
def callable_to_initialize_collaborator_private_attributes(
index, n_collaborators, train_ds, test_ds, population_ds, args):
index, n_collaborators, train_ds, test_ds, population_ds, args
):
# construct the training and test and population dataset
local_train = deepcopy(train_ds)
local_test = deepcopy(test_ds)
Expand Down Expand Up @@ -773,24 +770,30 @@ def callable_to_initialize_collaborator_private_attributes(
),
}


collaborators = []
for idx, collab_name in enumerate(collaborator_names):
collaborators.append(
Collaborator(
name=collab_name,
private_attributes_callable=callable_to_initialize_collaborator_private_attributes,
# If 1 GPU is available in the machine
# Set `num_gpus=0.0` to `num_gpus=0.5` to run on GPU with ray backend with 2 collaborators
num_cpus=0.0, num_gpus=0.0,
index=idx, n_collaborators=len(collaborator_names),
train_ds=train_dataset, test_ds=test_dataset,
population_ds=population_dataset, args=args
# Set `num_gpus=0.0` to `num_gpus=0.5` to run on GPU
# with ray backend with 2 collaborators
num_cpus=0.0,
num_gpus=0.0,
index=idx,
n_collaborators=len(collaborator_names),
train_ds=train_dataset,
test_ds=test_dataset,
population_ds=population_dataset,
args=args,
)
)

# Set backend='ray' to use ray-backend
local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="single_process")
local_runtime = LocalRuntime(
aggregator=aggregator, collaborators=collaborators, backend="single_process"
)

print(f"Local runtime collaborators = {local_runtime.collaborators}")

Expand Down
3 changes: 2 additions & 1 deletion openfl/experimental/interface/fl_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ def run(self) -> None:
"\nLocalRuntime(...,backend='single_process')\n"
"\n or for more information about the original error,"
"\nPlease see the official Ray documentation"
"\nhttps://docs.ray.io/en/releases-2.2.0/ray-core/objects/serialization.html"
"\nhttps://docs.ray.io/en/releases-2.2.0/ray-core/\
objects/serialization.html"
)
raise SerializationError(str(e) + msg)
else:
Expand Down
17 changes: 8 additions & 9 deletions openfl/experimental/interface/participants.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,20 +45,20 @@ class Collaborator(Participant):
def __init__(self, name: str = "", private_attributes_callable: Callable = None,
num_cpus: int = 0, num_gpus: int = 0.0, **kwargs):
"""
Create collaborator object with custom resources and a callable
Create collaborator object with custom resources and a callable
function to assign private attributes
Parameters:
name (str): Name of the collaborator. default=""
private_attributes_callable (Callable): A function which returns collaborator
private attributes for each collaborator. In case private_attributes are not
private attributes for each collaborator. In case private_attributes are not
required this can be omitted. default=None
num_cpus (int): Specifies how many cores to use for the collaborator step exection.
This will only be used if backend is set to ray. default=0
num_gpus (float): Specifies how many GPUs to use to accerlerate the collaborator
num_gpus (float): Specifies how many GPUs to use to accerlerate the collaborator
step exection. This will only be used if backend is set to ray. default=0
kwargs (dict): Parameters required to call private_attributes_callable function.
Expand All @@ -84,15 +84,15 @@ def get_name(self) -> str:

def initialize_private_attributes(self) -> None:
"""
initialize private attributes of Collaborator object by invoking
initialize private attributes of Collaborator object by invoking
the callable specified by user
"""
if self.private_attributes_callable is not None:
self.private_attributes = self.private_attributes_callable(**self.kwargs)

def __set_collaborator_attrs_to_clone(self, clone: Any) -> None:
"""
Set collaborator private attributes to FLSpec clone before transitioning
Set collaborator private attributes to FLSpec clone before transitioning
from Aggregator step to collaborator steps
"""
# set collaborator private attributes as
Expand All @@ -102,10 +102,10 @@ def __set_collaborator_attrs_to_clone(self, clone: Any) -> None:

def __delete_collab_attrs_from_clone(self, clone: Any) -> None:
"""
Remove collaborator private attributes from FLSpec clone before
Remove collaborator private attributes from FLSpec clone before
transitioning from Collaborator step to Aggregator step
"""
# Update collaborator private attributes by taking latest
# Update collaborator private attributes by taking latest
# parameters from clone, then delete attributes from clone.
for attr_name in self.private_attributes:
if hasattr(clone, attr_name):
Expand Down Expand Up @@ -146,9 +146,8 @@ def __init__(self, name: str = "", private_attributes_callable: Callable = None,

def initialize_private_attributes(self) -> None:
"""
initialize private attributes of Aggregator object by invoking
initialize private attributes of Aggregator object by invoking
the callable specified by user
"""
if self.private_attributes_callable is not None:
self.private_attributes = self.private_attributes_callable(**self.kwargs)

Loading

0 comments on commit 1fc8d3a

Please sign in to comment.