123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438 |
- ################################################################################
- # Copyright (c) 2021 ContinualAI. #
- # Copyrights licensed under the MIT License. #
- # See the accompanying LICENSE file for terms. #
- # #
- # Date: 2020-01-25 #
- # Author(s): Andrea Cossu #
- # E-mail: contact@continualai.org #
- # Website: avalanche.continualai.org #
- ################################################################################
- from typing import List, TYPE_CHECKING
- import torch
- import os
- import datetime
- from avalanche.evaluation.metric_results import MetricValue
- from avalanche.logging import StrategyLogger
- if TYPE_CHECKING:
- from avalanche.training import BaseStrategy
- class CSVLogger(StrategyLogger):
- """
- The `CSVLogger` logs accuracy and loss metrics into a csv file.
- Metrics are logged separately for training and evaluation in files
- training_results.csv and eval_results.csv, respectively.
- This Logger assumes that the user is evaluating on only one experience
- during training (see below for an example of a `train` call).
- Trough the `EvaluationPlugin`, the user should monitor at least
- EpochAccuracy/Loss and ExperienceAccuracy/Loss.
- If monitored, the logger will also record Experience Forgetting.
- In order to monitor the performance on held-out experience
- associated to the current training experience, set
- `eval_every=1` (or larger value) in the strategy constructor
- and pass the eval experience to the `train` method:
- `for i, exp in enumerate(benchmark.train_stream):`
- `strategy.train(exp, eval_streams=[benchmark.test_stream[i]])`
- When not provided, validation loss and validation accuracy
- will be logged as zero.
- The training file header is composed of:
- training_exp_id, epoch, training_accuracy, val_accuracy,
- training_loss, val_loss.
- The evaluation file header is composed of:
- eval_exp, training_exp, eval_accuracy, eval_loss, forgetting
- """
- def __init__(self, log_folder=None):
- """
- Creates an instance of `CSVLogger` class.
- :param log_folder: folder in which to create log files.
- If None, `csvlogs` folder in the default current directory
- will be used.
- """
- super().__init__()
- self.log_folder = log_folder if log_folder is not None else "csvlogs"
- os.makedirs(self.log_folder, exist_ok=True)
- self.training_file = open(os.path.join(self.log_folder,
- 'training_results.csv'), 'w')
- self.eval_file = open(os.path.join(self.log_folder,
- 'eval_results.csv'), 'w')
- os.makedirs(self.log_folder, exist_ok=True)
- # current training experience id
- self.training_exp_id = None
- # if we are currently training or evaluating
- # evaluation within training will not change this flag
- self.in_train_phase = None
- # validation metrics computed during training
- self.val_acc, self.val_loss = 0, 0
- # print csv headers
- print('training_exp', 'epoch', 'training_accuracy', 'val_accuracy',
- 'training_loss', 'val_loss', sep=',', file=self.training_file,
- flush=True)
- print('eval_exp', 'training_exp', 'eval_accuracy', 'eval_loss',
- 'forgetting', sep=',', file=self.eval_file, flush=True)
- def log_metric(self, metric_value: 'MetricValue', callback: str) -> None:
- pass
- def _val_to_str(self, m_val):
- if isinstance(m_val, torch.Tensor):
- return '\n' + str(m_val)
- elif isinstance(m_val, float):
- return f'{m_val:.4f}'
- else:
- return str(m_val)
- def print_train_metrics(self, training_exp, epoch, train_acc,
- val_acc, train_loss, val_loss):
- print(training_exp, epoch, self._val_to_str(train_acc),
- self._val_to_str(val_acc), self._val_to_str(train_loss),
- self._val_to_str(val_loss), sep=',',
- file=self.training_file, flush=True)
- def print_eval_metrics(self, eval_exp, training_exp, eval_acc,
- eval_loss, forgetting):
- print(eval_exp, training_exp, self._val_to_str(eval_acc),
- self._val_to_str(eval_loss), self._val_to_str(forgetting),
- sep=',', file=self.eval_file, flush=True)
- def after_training_epoch(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().after_training_epoch(strategy, metric_values, **kwargs)
- train_acc, val_acc, train_loss, val_loss = 0, 0, 0, 0
- for val in metric_values:
- if 'train_stream' in val.name:
- if val.name.startswith('Top1_Acc_Epoch'):
- train_acc = val.value
- elif val.name.startswith('Loss_Epoch'):
- train_loss = val.value
- self.print_train_metrics(self.training_exp_id, strategy.epoch,
- train_acc, self.val_acc, train_loss,
- self.val_loss)
- def after_eval_exp(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().after_eval_exp(strategy, metric_values, **kwargs)
- acc, loss, forgetting = 0, 0, 0
- for val in metric_values:
- if self.in_train_phase: # validation within training
- if val.name.startswith('Top1_Acc_Exp'):
- self.val_acc = val.value
- elif val.name.startswith('Loss_Exp'):
- self.val_loss = val.value
- else:
- if val.name.startswith('Top1_Acc_Exp'):
- acc = val.value
- elif val.name.startswith('Loss_Exp'):
- loss = val.value
- elif val.name.startswith('ExperienceForgetting'):
- forgetting = val.value
- if not self.in_train_phase:
- self.print_eval_metrics(strategy.experience.current_experience,
- self.training_exp_id, acc, loss,
- forgetting)
- def before_training_exp(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().before_training(strategy, metric_values, **kwargs)
- self.training_exp_id = strategy.experience.current_experience
- def before_eval(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- """
- Manage the case in which `eval` is first called before `train`
- """
- if self.in_train_phase is None:
- self.in_train_phase = False
- def before_training(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- self.in_train_phase = True
- def after_training(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- self.in_train_phase = False
- def close(self):
- self.training_file.close()
- self.eval_file.close()
- class GenericCSVLogger(StrategyLogger):
- # A more comprehensive CSV Logger capable of logging new metrics
-
- def __init__(self, log_folder=None):
- super().__init__()
- self.log_folder = log_folder if log_folder is not None else "csvlogs"
- os.makedirs(self.log_folder, exist_ok=True)
- datetime_stap = str(datetime.datetime.now()).replace(' ','_').replace(':','-').replace('.','-')[:-4]
- self.training_epoch_file = open(os.path.join(self.log_folder, '_training_epochs.csv'), 'w')
- self.test_stream_file = open(os.path.join(self.log_folder,'_test_stream.csv'), 'w')
- self.validation_stream_file = open(os.path.join(self.log_folder,'_validation_stream.csv'), 'w')
- self.training_stream_file = open(os.path.join(self.log_folder, '_training_stream.csv'), 'w')
- self.transfer_file = open(os.path.join(self.log_folder,'_training_transfer.csv'), 'w')
-
- os.makedirs(self.log_folder, exist_ok=True)
- # current training experience id
- self.training_exp_id = None
- # if we are currently training or evaluating
- # evaluation within training will not change this flag
- self.in_train_phase = None
- # validation metrics computed during training
- self.val_acc, self.val_loss = 0, 0
- def log_metric(self, metric_value: 'MetricValue', callback: str) -> None:
- pass
- def _val_to_str(self, m_val):
- if isinstance(m_val, torch.Tensor):
- return '\n' + str(m_val)
- elif isinstance(m_val, float):
- return f'{m_val:.4f}'
- else:
- return str(m_val)
- def print_vals_to_file(self, val_list, mode, exp, epoch, strategy):
- add_classes = False
- if mode =='test':
- classes_in_last_exp = [str(i)+' ' for i in self.classes_in_last_exp]
- classes_in_last_exp = ''.join(classes_in_last_exp)
- classes_in_last_exp = '['+classes_in_last_exp+']'
- add_classes = True
- log_file = self.test_stream_file
- elif mode =='validation':
- classes_in_last_exp = [str(i)+' ' for i in self.classes_in_last_exp]
- classes_in_last_exp = ''.join(classes_in_last_exp)
- classes_in_last_exp = '['+classes_in_last_exp+']'
- add_classes = True
- log_file = self.validation_stream_file
-
- elif mode =='train_epoch':
- log_file = self.training_epoch_file
- elif mode== 'train_stream':
- classes_in_last_exp = [str(i)+' ' for i in self.classes_in_last_exp]
- classes_in_last_exp = ''.join(classes_in_last_exp)
- classes_in_last_exp = '['+classes_in_last_exp+']'
- add_classes = True
- log_file = self.training_stream_file
-
- elif mode == 'transfer':
- log_file = self.transfer_file
- if exp == 0 and (epoch+1)==strategy.train_epochs:
- nr_exp = len(val_list)
- exp_list = torch.arange(nr_exp)
- exp_list = ['Exp '+ str(i.item()) for i in exp_list]
- exp_list = ['train_exp', 'epoch']+ exp_list
- print(*exp_list, sep=',', file=log_file, flush=True)
-
- val_list = [self._val_to_str(i) for i in val_list]
- if add_classes:
- log_list = [exp, epoch, classes_in_last_exp ]+ val_list
- else:
- log_list = [exp, epoch ]+ val_list
- print(*log_list, sep=',', file=log_file, flush=True)
- def after_training_epoch(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().after_training_epoch(strategy, metric_values, **kwargs)
- train_vals = [None] * len(self.train_metrics)
- train_exp = False
- metrics_found = False
- for val in metric_values:
- if 'train_stream' in val.name:
- train_exp = True
- metrics_found = True
- val_index = self.train_metrics.index(val.name.split('/')[0])
- train_vals[val_index] = val.value
- if train_exp:
- self.print_vals_to_file(train_vals, mode='train_epoch', exp = self.training_exp_id,epoch= strategy.epoch, strategy=strategy)
- def after_eval_exp(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().after_eval_exp(strategy, metric_values, **kwargs)
- metrics_found = False
- if metrics_found:
- if not train_eval_exp :
- self.print_vals_to_file(eval_vals, mode ='eval', exp = self.training_exp_id,epoch= strategy.epoch, strategy=strategy)
- else:
- self.print_vals_to_file(train_vals, mode ='train', exp = self.training_exp_id,epoch= strategy.epoch, strategy=strategy)
- def after_eval(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().after_eval_exp(strategy, metric_values, **kwargs)
- train_metrics_found = False
- validation_metrics_found = False
- test_metrics_found = False
- train_stream_accs = []
- train_stream_vals = [None]*len(self.eval_metrics)
- test_stream_vals = [None]*len(self.eval_metrics)
- validation_stream_vals = [None]*len(self.eval_metrics)
- classes_acc = []
- seq_classes_acc = []
- for val in metric_values:
- if 'eval_phase' in val.name: # validation within training
- if 'Transfer' in val.name :
- if 'train_stream' in val.name:
- train_metrics_found = True
- train_stream_accs.append(val.value)
- else:
- if 'train_stream' in val.name:
- train_metrics_found = True
- if 'SeqClasswise' in val.name:
- seq_classes_acc.append(val.value)
- elif 'Classwise' in val.name:
- classes_acc.append(val.value)
- else:
- val_index = self.eval_metrics.index(val.name.split('/')[0])
- train_stream_vals[val_index] = val.value
-
- if 'test_stream' in val.name:
- test_metrics_found = True
- if 'SeqClasswise' in val.name:
- seq_classes_acc.append(val.value)
- elif 'Classwise' in val.name:
- classes_acc.append(val.value)
- else:
- val_index = self.eval_metrics.index(val.name.split('/')[0])
- test_stream_vals[val_index] = val.value
- if 'validation_stream' in val.name:
- validation_metrics_found = True
- if 'SeqClasswise' in val.name:
- seq_classes_acc.append(val.value)
- elif 'Classwise' in val.name:
- classes_acc.append(val.value)
- else:
- val_index = self.eval_metrics.index(val.name.split('/')[0])
- validation_stream_vals[val_index] = val.value
-
- if test_metrics_found:
- if classes_acc != []:
- test_stream_vals = test_stream_vals+classes_acc
- if seq_classes_acc != []:
- test_stream_vals = test_stream_vals+seq_classes_acc
- self.print_vals_to_file(test_stream_vals, mode ='test', exp = self.training_exp_id,epoch= strategy.epoch, strategy=strategy)
- if validation_metrics_found:
- if classes_acc != []:
- validation_stream_vals = validation_stream_vals+classes_acc
- if seq_classes_acc != []:
- validation_stream_vals = validation_stream_vals+seq_classes_acc
- self.print_vals_to_file(validation_stream_vals, mode ='validation', exp = self.training_exp_id,epoch= strategy.epoch, strategy=strategy)
- if train_metrics_found:
- if train_stream_accs != []:
- self.print_vals_to_file(train_stream_accs, mode ='transfer', exp=self.training_exp_id, epoch=strategy.epoch, strategy=strategy)
- if not None in train_stream_vals:
- if classes_acc != []:
- train_stream_vals = train_stream_vals+classes_acc
- if seq_classes_acc != []:
- train_stream_vals = train_stream_vals+seq_classes_acc
- self.print_vals_to_file(train_stream_vals, mode='train_stream', exp=self.training_exp_id, epoch=strategy.epoch, strategy=strategy)
- def before_training_exp(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- super().before_training(strategy, metric_values, **kwargs)
- self.training_exp_id = strategy.experience.current_experience
- strategy.current_train_exp_seen = strategy.experience.current_experience
- self.classes_in_last_exp = strategy.experience.classes_in_this_experience
- def before_eval(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- """
- Manage the case in which `eval` is first called before `train`
- """
- if self.in_train_phase is None:
- self.in_train_phase = False
- def before_training(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- if strategy.experience == None:
- self.train_metrics =[]
- self.eval_metrics = []
- logging_classes_accs = False
- seq_logging_classes_accs = False
- for metric in self.all_metrics:
- if metric._mode == 'train':
- self.train_metrics.append(str(metric))
- else:
- if 'SeqClasswise' in str(metric):
- seq_logging_classes_accs =True
- elif 'Classwise' in str(metric):
- logging_classes_accs =True
- elif not 'Transfer' in str(metric) :
- self.eval_metrics.append(str(metric))
- train_head = ['train_exp', 'epoch']+ self.train_metrics
- eval_head = ['train_exp', 'epoch', 'train exp classes']+ self.eval_metrics
- if logging_classes_accs:
- classes_head = ['Cls'+str(i.item()) for i in torch.arange(strategy.model.num_classes)]
- eval_head = eval_head +classes_head
- if seq_logging_classes_accs:
- classes_head = ['SeqCls'+str(i.item()) for i in torch.arange(strategy.model.num_classes)]
- eval_head = eval_head +classes_head
- print(*train_head, sep=',', file=self.training_epoch_file, flush=True)
- print(*eval_head, sep=',', file=self.validation_stream_file, flush=True)
- print(*eval_head, sep=',', file=self.test_stream_file, flush=True)
- print(*eval_head, sep=',', file=self.training_stream_file, flush=True)
- self.in_train_phase = True
- def after_training(self, strategy: 'BaseStrategy',
- metric_values: List['MetricValue'], **kwargs):
- self.in_train_phase = False
- def close(self):
- self.training_epoch_file.close()
- self.validation_stream_file.close()
- self.test_stream_file.close()
- self.training_stream_file.close()
- self.transfer_file.close()
|