فهرست منبع

updated cosine annealing option

Dimitri Korsch 6 سال پیش
والد
کامیت
693bfad302
5فایلهای تغییر یافته به همراه19 افزوده شده و 5 حذف شده
  1. 5 0
      cvfinetune/classifier.py
  2. 7 2
      cvfinetune/parser.py
  3. 1 0
      cvfinetune/training/__init__.py
  4. 6 2
      cvfinetune/training/trainer.py
  5. 0 1
      examples/basic/main.py

+ 5 - 0
cvfinetune/classifier.py

@@ -20,6 +20,11 @@ class Classifier(C):
 
 		return self.model.meta.feature_size
 
+	@property
+	def output_size(self):
+		return self.feat_size
+
+
 class SeparateModelClassifier(Classifier):
 	"""Classifier, that holds two separate models"""
 

+ 7 - 2
cvfinetune/parser.py

@@ -4,6 +4,7 @@ import platform
 
 from chainer_addons.training import OptimizerType
 from chainer_addons.models import PrepareType
+from chainer_addons.links import PoolingType
 
 from cvargparse import GPUParser, Arg, ArgFactory
 from cvdatasets.utils import read_info_file
@@ -29,6 +30,9 @@ def default_factory(extra_list=[]):
 			PrepareType.as_arg("prepare_type",
 				help_text="type of image preprocessing"),
 
+			PoolingType.as_arg("pooling",
+				help_text="type of pre-classification pooling"),
+
 			Arg("--load", type=str, help="ignore weights and load already fine-tuned model"),
 
 			Arg("--n_jobs", "-j", type=int, default=0,
@@ -39,8 +43,9 @@ def default_factory(extra_list=[]):
 			OptimizerType.as_arg("optimizer", "opt",
 				help_text="type of the optimizer"),
 
-			Arg("--cosine_schedule", action="store_true",
-				help="enable cosine annealing LR schedule"),
+			Arg("--cosine_schedule", type=int,
+				default=-1,
+				help="enable cosine annealing LR schedule. This parameter sets the number of schedule stages"),
 
 			Arg("--l1_loss", action="store_true",
 				help="(only with \"--only_head\" option!) use L1 Hinge Loss instead of Softmax Cross-Entropy"),

+ 1 - 0
cvfinetune/training/__init__.py

@@ -1 +1,2 @@
 from .trainer import Trainer
+from .trainer import default_intervals

+ 6 - 2
cvfinetune/training/trainer.py

@@ -4,6 +4,7 @@ from datetime import datetime
 
 import chainer
 from chainer.training import extensions, Trainer as T
+from chainer.training import trigger as trigger_module
 from chainer_addons.training import lr_shift
 from chainer_addons.training.optimizer import OptimizerType
 from chainer_addons.training.extensions import SacredReport
@@ -78,14 +79,17 @@ class Trainer(T):
 
 
 		### LR shift ###
-		if opts.cosine_schedule:
+		if opts.cosine_schedule is not None and opts.cosine_schedule > 0:
 			lr_shift_ext = CosineAnnealingLearningRate(
 				attr="alpha" if is_adam else "lr",
 				lr=opts.learning_rate,
 				target=opts.lr_target,
 				epochs=opts.epochs,
-				offset=lr_offset
+				offset=lr_offset,
+				stages=opts.cosine_schedule
 			)
+			new_epochs = lr_shift_ext._epochs
+			self.stop_trigger = trigger_module.get_trigger((new_epochs, "epoch"))
 			self.extend(lr_shift_ext)
 		else:
 			lr_shift_ext = lr_shift(optimizer,

+ 0 - 1
examples/basic/main.py

@@ -11,7 +11,6 @@ import logging
 
 from chainer.training.updaters import StandardUpdater
 
-from chainer_addons.models.classifier import Classifier
 
 from cvfinetune.finetuner import DefaultFinetuner
 from cvfinetune.training.trainer import Trainer