Skip to content

classification

GeneralCalculator

Bases: BasicTaskCalculator

Calculator for the dataset in torchvision.datasets.

Parameters:

Name Type Description Default
device torch.device

device

required
optimizer_name str

the name of the optimizer

'sgd'
Source code in flgo\benchmark\toolkits\nlp\classification\__init__.py
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
class GeneralCalculator(BasicTaskCalculator):
    r"""
    Calculator for the dataset in torchvision.datasets.

    Args:
        device (torch.device): device
        optimizer_name (str): the name of the optimizer
    """
    def __init__(self, device, optimizer_name='sgd'):
        super(GeneralCalculator, self).__init__(device, optimizer_name)
        self.criterion = torch.nn.CrossEntropyLoss()
        self.DataLoader = torch.utils.data.DataLoader

    def compute_loss(self, model, data):
        """
        Args:
            model: the model to train
            data: the training dataset
        Returns: dict of train-one-step's result, which should at least contains the key 'loss'
        """
        text, label  = self.to_device(data)
        outputs = model(text)
        loss = self.criterion(outputs, label)
        return {'loss': loss}

    @torch.no_grad()
    def test(self, model, dataset, batch_size=64, num_workers=0, pin_memory=False):
        """
        Metric = [mean_accuracy, mean_loss]

        Args:
            model:
            dataset:
            batch_size:
        Returns: [mean_accuracy, mean_loss]
        """
        model.eval()
        if batch_size==-1:batch_size=len(dataset)
        data_loader = self.get_dataloader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory)
        total_loss = 0.0
        num_correct = 0
        for batch_id, batch_data in enumerate(data_loader):
            batch_data = self.to_device(batch_data)
            outputs = model(batch_data[0])
            batch_mean_loss = self.criterion(outputs, batch_data[1]).item()
            # y_pred = outputs.data.max(1, keepdim=True)[1]
            # correct = y_pred.eq(batch_data[-1].data.view_as(y_pred)).long().cpu().sum()
            num_correct += (outputs.argmax(1)==batch_data[1]).sum().item()
            total_loss += batch_mean_loss * len(batch_data[0])
        return {'accuracy': 1.0*num_correct/len(dataset), 'loss':total_loss/len(dataset)}

    def to_device(self, data):
        res = []
        for i in range(len(data)):
            if isinstance(data[i], torch.Tensor):
                di = data[i].to(self.device)
            elif isinstance(data[i], list):
                di = [d.to(self.device) for d in data[i]]
            else:
                raise TypeError('data should be either of type list or torch.Tensor')
            res.append(di)
        return tuple(res)

    def get_dataloader(self, dataset, batch_size=64, shuffle=True, num_workers=0, pin_memory=False, drop_last=False):
        if self.DataLoader == None:
            raise NotImplementedError("DataLoader Not Found.")
        return self.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, collate_fn=self.collect_fn)

compute_loss(model, data)

Parameters:

Name Type Description Default
model

the model to train

required
data

the training dataset

required
Source code in flgo\benchmark\toolkits\nlp\classification\__init__.py
 97
 98
 99
100
101
102
103
104
105
106
107
def compute_loss(self, model, data):
    """
    Args:
        model: the model to train
        data: the training dataset
    Returns: dict of train-one-step's result, which should at least contains the key 'loss'
    """
    text, label  = self.to_device(data)
    outputs = model(text)
    loss = self.criterion(outputs, label)
    return {'loss': loss}

test(model, dataset, batch_size=64, num_workers=0, pin_memory=False)

Metric = [mean_accuracy, mean_loss]

Parameters:

Name Type Description Default
model required
dataset required
batch_size 64
Source code in flgo\benchmark\toolkits\nlp\classification\__init__.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
@torch.no_grad()
def test(self, model, dataset, batch_size=64, num_workers=0, pin_memory=False):
    """
    Metric = [mean_accuracy, mean_loss]

    Args:
        model:
        dataset:
        batch_size:
    Returns: [mean_accuracy, mean_loss]
    """
    model.eval()
    if batch_size==-1:batch_size=len(dataset)
    data_loader = self.get_dataloader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory)
    total_loss = 0.0
    num_correct = 0
    for batch_id, batch_data in enumerate(data_loader):
        batch_data = self.to_device(batch_data)
        outputs = model(batch_data[0])
        batch_mean_loss = self.criterion(outputs, batch_data[1]).item()
        # y_pred = outputs.data.max(1, keepdim=True)[1]
        # correct = y_pred.eq(batch_data[-1].data.view_as(y_pred)).long().cpu().sum()
        num_correct += (outputs.argmax(1)==batch_data[1]).sum().item()
        total_loss += batch_mean_loss * len(batch_data[0])
    return {'accuracy': 1.0*num_correct/len(dataset), 'loss':total_loss/len(dataset)}