News
08/17/2023Communications log fixed in baseline code
We have fixed an issue to reduce the number of bytes sent between the clients and the server. Previously, all the weights of the model's layers were being sent, but now, only the parameters of the non-frozen layers are being transmitted. Thus, reducing the total amount of information communicated.
We recommend doing a git rebase from the baseline code. However, if you have performed many changes and you find it difficult, below we list the changes we have done for this (in grey, we show the previous implementation, in black, the new version, and in bold, the file, class, and function headers to find the code snippets easier). Moreover, in the code, we have let the previous implementation commented to facilitate the integration of the new changes.
utils_parallel.py
-
def get_parameters_from_model(model):
return [val.cpu().numpy() for _, val in model.model.state_dict().items()] -
def get_parameters_from_model(model):
keyed_parameters = {n: p.requires_grad for n, p in model.model.named_parameters()}
frozen_parameters = [not keyed_parameters[n] if n in keyed_parameters else False for n, p in model.model.state_dict().items()]
return [val.cpu().numpy() for val, is_frozen in zip(model.model.state_dict().values(), frozen_parameters) if not is_frozen] -
def set_parameters_model(model, parameters):
params_dict = zip(model.model.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
model.model.load_state_dict(state_dict, strict=True) -
def set_parameters_model(model, parameters):
keyed_parameters = {n: p.requires_grad for n, p in model.model.named_parameters()}
frozen_parameters = [not keyed_parameters[n] if n in keyed_parameters else False for n, p in model.model.state_dict().items()]i = 0
params_dict = model.model.state_dict()
for key, is_frozen in zip(model.model.state_dict().keys(), frozen_parameters):# Update state dict with new params.
if not is_frozen:
params_dict[key] = torch.Tensor(parameters[i])
i += 1model.model.load_state_dict(params_dict, strict=True)
train.pỳ
- class FlowerClient(fl.client.NumPyClient):
- def set_parameters(self, model, parameters, config):
params_dict = zip(model.model.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
log_communication(federated_round=config.current_round, sender=-1, receiver=self.client_id, data=parameters, log_location=self.logger.comms_log_file)
model.model.load_state_dict(state_dict, strict=True) -
def set_parameters(self, model, parameters, config):
set_parameters_model(model, parameters) # Use starndard set_parameters model function.
log_communication(federated_round=config.current_round, sender=-1, receiver=self.client_id, data=parameters, log_location=self.logger.comms_log_file) -
def fl_train(data_loaders, model, optimizer, evaluator, logger, client_id, fl_config):
...
upd_weights = [torch.add(agg_upd, w_0).cpu() for agg_upd, w_0 in zip(agg_update, copy.deepcopy(parameters))] # Send all weights.
upd_weights = [torch.add(agg_upd, w_0).cpu() for agg_upd, w_0, is_frozen in zip(agg_update, copy.deepcopy(parameters), frozen_parameters) if not is_frozen] # Send weights of NON-Frozen layers....
if config.flower:
log_communication(federated_round=fl_config.current_round, sender=client_id, receiver=-1, data=upd_weights, log_location=logger.comms_log_file) # Store model's weights bytes.
log_communication(federated_round=fl_config.current_round, sender=client_id, receiver=-1, data=upd_weights, log_location=logger.comms_log_file) # Store only communicated weights (sent parameters).
- def set_parameters(self, model, parameters, config):