Skip to content

graph_classification

GeneralCalculator

Bases: BasicTaskCalculator

Source code in flgo\benchmark\toolkits\graph\graph_classification\__init__.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
class GeneralCalculator(BasicTaskCalculator):
    def __init__(self, device, optimizer_name='sgd'):
        super(GeneralCalculator, self).__init__(device, optimizer_name)
        self.criterion = torch.nn.CrossEntropyLoss()
        self.DataLoader = DataLoader

    def compute_loss(self, model, data):
        """
        Args: model: the model to train
                 data: the training dataset
        Returns: dict of train-one-step's result, which should at least contains the key 'loss'
        """
        tdata = self.to_device(data)
        outputs = model(tdata)
        loss = self.criterion(outputs, tdata.y)
        return {'loss': loss}

    @torch.no_grad()
    def test(self, model, dataset, batch_size=64, num_workers=0, pin_memory=False):
        """
        Metric = [mean_accuracy, mean_loss]
        Args:
            dataset:
                 batch_size:
        Returns: [mean_accuracy, mean_loss]
        """
        model.eval()
        if batch_size == -1: batch_size = len(dataset)
        data_loader = self.get_dataloader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory)
        total_loss = 0.0
        num_correct = 0
        for batch_id, batch_data in enumerate(data_loader):
            batch_data = self.to_device(batch_data)
            outputs = model(batch_data)
            batch_mean_loss = self.criterion(outputs, batch_data.y).item()
            y_pred = outputs.data.max(1, keepdim=True)[1]
            correct = y_pred.eq(batch_data.y.data.view_as(y_pred)).long().cpu().sum()
            num_correct += correct.item()
            total_loss += batch_mean_loss * len(batch_data.y)
        return {'accuracy': 1.0 * num_correct / len(dataset), 'loss': total_loss / len(dataset)}

    def to_device(self, data):
        return data.to(self.device)

    def get_dataloader(self, dataset, batch_size=64, shuffle=True, num_workers=0, pin_memory=False):
        if self.DataLoader == None:
            raise NotImplementedError("DataLoader Not Found.")
        return self.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)

compute_loss(model, data)

model: the model to train

Name Type Description Default
data

the training dataset

required
Source code in flgo\benchmark\toolkits\graph\graph_classification\__init__.py
139
140
141
142
143
144
145
146
147
148
def compute_loss(self, model, data):
    """
    Args: model: the model to train
             data: the training dataset
    Returns: dict of train-one-step's result, which should at least contains the key 'loss'
    """
    tdata = self.to_device(data)
    outputs = model(tdata)
    loss = self.criterion(outputs, tdata.y)
    return {'loss': loss}

test(model, dataset, batch_size=64, num_workers=0, pin_memory=False)

Metric = [mean_accuracy, mean_loss]

Parameters:

Name Type Description Default
dataset

batch_size:

required
Source code in flgo\benchmark\toolkits\graph\graph_classification\__init__.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
@torch.no_grad()
def test(self, model, dataset, batch_size=64, num_workers=0, pin_memory=False):
    """
    Metric = [mean_accuracy, mean_loss]
    Args:
        dataset:
             batch_size:
    Returns: [mean_accuracy, mean_loss]
    """
    model.eval()
    if batch_size == -1: batch_size = len(dataset)
    data_loader = self.get_dataloader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory)
    total_loss = 0.0
    num_correct = 0
    for batch_id, batch_data in enumerate(data_loader):
        batch_data = self.to_device(batch_data)
        outputs = model(batch_data)
        batch_mean_loss = self.criterion(outputs, batch_data.y).item()
        y_pred = outputs.data.max(1, keepdim=True)[1]
        correct = y_pred.eq(batch_data.y.data.view_as(y_pred)).long().cpu().sum()
        num_correct += correct.item()
        total_loss += batch_mean_loss * len(batch_data.y)
    return {'accuracy': 1.0 * num_correct / len(dataset), 'loss': total_loss / len(dataset)}