Skip to content

1.6 Logger Customization

This section introduces the usage of Logger. Logger is responsble for recording running-time variables of interest that can be very dependent on personal usage. We offer three key APIs to support different experiment purposes.

import flgo.experiment.logger as fel
class Logger(fel.BasicLogger):
    def initialize(self):
        # initialize necessary variables BEFORE training starts
        pass

    def log_once(self):
        # will be carried out every K communication rounds DURING training process
        pass

    def organize_output(self):
        # organize output AFTER training ends
        pass

The three APIs are respectively responsible for customized operations before\during\after training. All the variables of interest should be recorded into self.output that will be finally saved as .json file. self.output is of type collections.defaultdict, and the default value of each key is an empty list. Now we take the following example to show how to customize Logger.

1.6.1 Example: Customization on Logger

import collections
import numpy as np
import copy

class MyLogger(fel.BasicLogger):
    def initialize(self, *args, **kwargs):
        self.optimal_model = copy.deepcopy(self.coordinator.model)
        self.optimal_test_loss = 9999

    def log_once(self):
        # evaluation on testing data
        test_metric = self.coordinator.test()
        for met_name, met_val in test_metric.items():
            self.output['test_' + met_name].append(met_val)
        # check whether the current model is the optimal
        if test_metric['loss']<self.optimal_test_loss:
            self.optimal_test_loss = test_metric['loss']
            self.optimal_model.load_state_dict(self.coordinator.model.state_dict())
        self.show_current_output()

    def organize_output(self):
        super().organize_output()
        # evaluation on clients' validation datasets
        all_metrics = collections.defaultdict(list)
        for c in self.participants:
            client_metrics = c.test(self.optimal_model, 'val')
            for met_name, met_val in client_metrics.items():
                all_metrics[met_name].append(met_val)
        for met_name, metval in all_metrics.items():
            self.output[met_name] = metval
        # compute the optimal\worst 30% metrics on validation datasets
        met_name = 'loss'
        all_valid_losses = sorted(all_metrics[met_name])
        k1 = int(0.3*len(self.participants))
        k2 = int(0.7*len(self.participants))
        self.output['worst_30_valid_loss'] = 1.0*sum(all_valid_losses[k2:])/k1
        self.output['best_30_valid_loss']  = 1.0*sum(all_valid_losses[:k1])/k1

import flgo.algorithm.fedavg as fedavg
import flgo.algorithm.qfedavg as qfedavg
import os
task = './test_synthetic' # this task has been generated in Sec.1.3.1

# running optimization
op = {'num_rounds':30, 'num_epochs':1, 'batch_size':8, 'learning_rate':0.1, 'proportion':1.0 ,'gpu':0, 'algo_para':0.1}
fedavg_runner = flgo.init(task, fedavg, option = op, Logger=MyLogger)
qffl_runner = flgo.init(task, qfedavg, option=op, Logger=MyLogger)
fedavg_runner.run()
qffl_runner.run()

# Result analysis
import flgo.experiment.analyzer as al
records = al.Selector({'task':task, 'header':['fedavg', 'qfedavg_q0.1',], 'filter':{'R':30, 'E':1, 'B':8, 'LR':0.1,'P':1.0}}).records[task]
for rec in records:
    wl = rec.data['worst_30_valid_loss']
    bl = rec.data['best_30_valid_loss']
    print('{}:(Worst is {}, Best is {})'.format(rec.data['option']['algorithm'], wl, bl))
fedavg:(Worst is 1.5370861026975844, Best is 0.15324175854523978)
qfedavg:(Worst is 1.5319330559836493, Best is 0.4078656468126509)

The results tells that qfedavg has a superior performance for the worst 30% clients but sacrifies model performance for the optimal 30% clients.