Просмотр исходного кода

refactored default parser arguments in to groups

Dimitri Korsch 6 лет назад
Родитель
Сommit
6f877c7f28

+ 1 - 1
cvfinetune/__init__.py

@@ -1 +1 @@
-__version__ = "0.3.2"
+__version__ = "0.4.0"

+ 0 - 132
cvfinetune/parser.py

@@ -1,132 +0,0 @@
-import os
-import logging
-import platform
-import warnings
-
-from chainer_addons.training import OptimizerType
-from chainer_addons.models import PrepareType
-from chainer_addons.links import PoolingType
-
-from cvargparse import GPUParser, Arg, ArgFactory
-from cvdatasets.utils import read_info_file
-
-DEFAULT_INFO_FILE = os.environ.get("DATA")
-
-if DEFAULT_INFO_FILE is not None and os.path.isfile(DEFAULT_INFO_FILE):
-	info_file = read_info_file(DEFAULT_INFO_FILE)
-else:
-	info_file = None
-
-WARNING = """Could not find default info file \"{}\". """ + \
-"""Some arguments (dataset, parts etc.) are not restraint to certain choices! """ + \
-"""You can set <DATA> environment variable to change the default info file location."""
-
-def default_factory(extra_list=[]):
-	if info_file is None:
-		warnings.warn(WARNING.format(DEFAULT_INFO_FILE))
-		arg_list0 = [
-			Arg("data"),
-			Arg("dataset"),
-			Arg("parts"),
-
-			Arg("--model_type", "-mt",
-				default="resnet",
-				help="type of the model"),
-		]
-	else:
-		arg_list0 = [
-			Arg("data", default=DEFAULT_INFO_FILE),
-			Arg("dataset", choices=info_file.DATASETS.keys()),
-			Arg("parts", choices=info_file.PARTS.keys()),
-			Arg("--model_type", "-mt",
-				default="resnet", choices=info_file.MODELS.keys(),
-				help="type of the model"),
-		]
-
-	arg_list1 = [
-		Arg("--input_size", type=int, nargs="+", default=0,
-			help="overrides default input size of the model, if greater than 0"),
-
-		PrepareType.as_arg("prepare_type",
-			help_text="type of image preprocessing"),
-
-		PoolingType.as_arg("pooling",
-			help_text="type of pre-classification pooling"),
-
-		Arg("--load", type=str, help="ignore weights and load already fine-tuned model (classifier will NOT be re-initialized and number of classes will be unchanged)"),
-		Arg("--weights", type=str, help="ignore default weights and load already pre-trained model (classifier will be re-initialized and number of classes will be changed)"),
-		Arg("--headless", action="store_true", help="ignores classifier layer during loading"),
-
-		Arg("--n_jobs", "-j", type=int, default=0,
-			help="number of loading processes. If 0, then images are loaded in the same process"),
-
-		Arg("--warm_up", type=int, help="warm up epochs"),
-
-		OptimizerType.as_arg("optimizer", "opt",
-			help_text="type of the optimizer"),
-
-		Arg("--cosine_schedule", type=int,
-			default=-1,
-			help="enable cosine annealing LR schedule. This parameter sets the number of schedule stages"),
-
-		Arg("--l1_loss", action="store_true",
-			help="(only with \"--only_head\" option!) use L1 Hinge Loss instead of Softmax Cross-Entropy"),
-
-		Arg("--from_scratch", action="store_true",
-			help="Do not load any weights. Train the model from scratch"),
-
-		Arg("--label_shift", type=int, default=1,
-			help="label shift"),
-
-		Arg("--swap_channels", action="store_true",
-			help="preprocessing option: swap channels from RGB to BGR"),
-
-		Arg("--label_smoothing", type=float, default=0,
-			help="Factor for label smoothing"),
-
-		Arg("--no_center_crop_on_val", action="store_true",
-			help="do not center crop imaages in the validation step!"),
-
-		Arg("--only_head", action="store_true", help="fine-tune only last layer"),
-		Arg("--no_progress", action="store_true", help="dont show progress bar"),
-		Arg("--augment", action="store_true", help="do data augmentation (random croping and random hor. flipping)"),
-		Arg("--force_load", action="store_true", help="force loading from caffe model"),
-		Arg("--only_eval", action="store_true", help="evaluate the model only. do not train!"),
-		Arg("--init_eval", action="store_true", help="evaluate the model before training"),
-		Arg("--no_snapshot", action="store_true", help="do not save trained model"),
-
-		Arg("--output", "-o", type=str, default=".out", help="output folder"),
-	]
-
-	return ArgFactory(extra_list + arg_list0 + arg_list1)\
-		.seed()\
-		.batch_size()\
-		.epochs()\
-		.debug()\
-		.learning_rate(lr=1e-2, lrs=10, lrt=1e-5, lrd=1e-1)\
-		.weight_decay(default=5e-4)
-
-
-class FineTuneParser(GPUParser):
-	def init_logger(self, simple=False, logfile=None):
-		if not self.has_logging: return
-		fmt = '{levelname:s} - [{asctime:s}] {filename:s}:{lineno:d} [{funcName:s}]: {message:s}'
-
-		handler0 = logging.StreamHandler()
-		handler0.addFilter(HostnameFilter())
-		handler0.setFormatter(logging.Formatter("<{hostname:^10s}>: " + fmt, style="{"))
-
-		filename = logfile if logfile is not None else f"{platform.node()}.log"
-		handler1 = logging.FileHandler(filename=filename, mode="w")
-		handler1.setFormatter(logging.Formatter(fmt, style="{"))
-
-		logger = logging.getLogger()
-		logger.addHandler(handler0)
-		logger.addHandler(handler1)
-		logger.setLevel(getattr(logging, self.args.loglevel.upper(), logging.DEBUG))
-
-class HostnameFilter(logging.Filter):
-
-	def filter(self, record):
-		record.hostname = platform.node()
-		return True

+ 8 - 0
cvfinetune/parser/__init__.py

@@ -0,0 +1,8 @@
+from cvfinetune.parser.base import FineTuneParser, default_factory
+
+
+__all__ = [
+	"FineTuneParser",
+	"default_factory"
+]
+

+ 48 - 0
cvfinetune/parser/base.py

@@ -0,0 +1,48 @@
+import os
+import logging
+import platform
+
+
+from cvargparse import GPUParser, Arg, ArgFactory
+
+from cvfinetune.parser.dataset_args import add_dataset_args
+from cvfinetune.parser.model_args import add_model_args
+from cvfinetune.parser.training_args import add_training_args
+
+
+def default_factory(extra_list=[]):
+	return ArgFactory(extra_list)
+
+
+class FineTuneParser(GPUParser):
+	def init_logger(self, simple=False, logfile=None):
+		if not self.has_logging: return
+		fmt = '{levelname:s} - [{asctime:s}] {filename:s}:{lineno:d} [{funcName:s}]: {message:s}'
+
+		handler0 = logging.StreamHandler()
+		handler0.addFilter(HostnameFilter())
+		handler0.setFormatter(logging.Formatter("<{hostname:^10s}>: " + fmt, style="{"))
+
+		filename = logfile if logfile is not None else f"{platform.node()}.log"
+		handler1 = logging.FileHandler(filename=filename, mode="w")
+		handler1.setFormatter(logging.Formatter(fmt, style="{"))
+
+		logger = logging.getLogger()
+		logger.addHandler(handler0)
+		logger.addHandler(handler1)
+		logger.setLevel(getattr(logging, self.args.loglevel.upper(), logging.DEBUG))
+
+
+	def __init__(self, *args, **kwargs):
+		super(FineTuneParser, self).__init__(*args, **kwargs)
+
+		add_dataset_args(self)
+		add_model_args(self)
+		add_training_args(self)
+
+
+class HostnameFilter(logging.Filter):
+
+	def filter(self, record):
+		record.hostname = platform.node()
+		return True

+ 46 - 0
cvfinetune/parser/dataset_args.py

@@ -0,0 +1,46 @@
+import abc
+
+from cvargparse import Arg
+from cvfinetune.parser.utils import DEFAULT_INFO_FILE
+from cvfinetune.parser.utils import get_info_file
+from cvfinetune.parser.utils import parser_extender
+
+@parser_extender
+def add_dataset_args(parser):
+
+	info_file = get_info_file()
+
+	if info_file is None:
+		_args = [
+			Arg("data"),
+			Arg("dataset"),
+			Arg("parts")]
+	else:
+		_args = [
+			Arg("data", default=DEFAULT_INFO_FILE),
+			Arg("dataset", choices=info_file.DATASETS.keys()),
+			Arg("parts", choices=info_file.PARTS.keys()),
+		]
+
+	_args.extend([
+
+		Arg("--label_shift", type=int, default=1,
+			help="label shift"),
+
+		Arg("--swap_channels", action="store_true",
+			help="preprocessing option: swap channels from RGB to BGR"),
+
+	])
+
+	parser.add_args(_args, group_name="Dataset arguments")
+
+class DatasetParserMixin(abc.ABC):
+	def __init__(self, *args, **kwargs):
+		super(DatasetParserMixin, self).__init__(*args, **kwargs)
+		add_dataset_args(self)
+
+
+__all__ = [
+	"DatasetParserMixin",
+	"add_dataset_args"
+]

+ 59 - 0
cvfinetune/parser/model_args.py

@@ -0,0 +1,59 @@
+import abc
+
+from cvargparse import Arg
+from cvfinetune.parser.utils import get_info_file
+from cvfinetune.parser.utils import parser_extender
+
+from chainer_addons.links import PoolingType
+from chainer_addons.models import PrepareType
+
+@parser_extender
+def add_model_args(parser):
+
+	info_file = get_info_file()
+
+	if info_file is None:
+		model_type_choices = None
+	else:
+		model_type_choices = info_file.MODELS.keys()
+
+	_args = [
+		Arg("--model_type", "-mt",
+			default="resnet", choices=model_type_choices,
+			help="type of the model"),
+
+		Arg("--input_size", type=int, nargs="+", default=0,
+			help="overrides default input size of the model, if greater than 0"),
+
+		PrepareType.as_arg("prepare_type",
+			help_text="type of image preprocessing"),
+
+		PoolingType.as_arg("pooling",
+			help_text="type of pre-classification pooling"),
+
+		Arg("--load", type=str,
+			help="ignore weights and load already fine-tuned model (classifier will NOT be re-initialized and number of classes will be unchanged)"),
+
+		Arg("--weights", type=str,
+			help="ignore default weights and load already pre-trained model (classifier will be re-initialized and number of classes will be changed)"),
+
+		Arg("--headless", action="store_true",
+			help="ignores classifier layer during loading"),
+
+		Arg("--force_load", action="store_true",
+			help="force loading from caffe model"),
+	]
+
+	parser.add_args(_args, group_name="Model arguments")
+
+
+class ModelParserMixin(abc.ABC):
+	def __init__(self, *args, **kwargs):
+		super(ModelParserMixin, self).__init__(*args, **kwargs)
+		add_model_args(self)
+
+
+__all__ = [
+	"ModelParserMixin",
+	"add_model_args"
+]

+ 75 - 0
cvfinetune/parser/training_args.py

@@ -0,0 +1,75 @@
+import abc
+
+from cvargparse import Arg
+from cvargparse import ArgFactory
+from cvfinetune.parser.utils import parser_extender
+
+from chainer_addons.training import OptimizerType
+
+@parser_extender
+def add_training_args(parser):
+
+	_args = ArgFactory([
+
+		Arg("--n_jobs", "-j", type=int, default=0,
+			help="number of loading processes. If 0, then images are loaded in the same process"),
+
+		Arg("--warm_up", type=int, help="warm up epochs"),
+
+		OptimizerType.as_arg("optimizer", "opt",
+			help_text="type of the optimizer"),
+
+		Arg("--cosine_schedule", type=int,
+			default=-1,
+			help="enable cosine annealing LR schedule. This parameter sets the number of schedule stages"),
+
+		Arg("--l1_loss", action="store_true",
+			help="(only with \"--only_head\" option!) use L1 Hinge Loss instead of Softmax Cross-Entropy"),
+
+		Arg("--from_scratch", action="store_true",
+			help="Do not load any weights. Train the model from scratch"),
+
+		Arg("--label_smoothing", type=float, default=0,
+			help="Factor for label smoothing"),
+
+		Arg("--no_center_crop_on_val", action="store_true",
+			help="do not center crop images in the validation step!"),
+
+		Arg("--only_head", action="store_true", help="fine-tune only last layer"),
+		Arg("--augment", action="store_true", help="do data augmentation (random croping and random hor. flipping)"),
+
+	])\
+	.seed()\
+	.batch_size()\
+	.epochs()\
+	.debug()\
+	.learning_rate(lr=1e-2, lrs=10, lrt=1e-5, lrd=1e-1)\
+	.weight_decay(default=5e-4)
+
+	parser.add_args(_args, group_name="Training arguments")
+
+	_args = [
+			Arg("--only_eval", action="store_true", help="evaluate the model only. do not train!"),
+			Arg("--init_eval", action="store_true", help="evaluate the model before training"),
+	]
+
+	parser.add_args(_args, group_name="Evaluation arguments")
+
+	_args = [
+		Arg("--no_progress", action="store_true", help="dont show progress bar"),
+		Arg("--no_snapshot", action="store_true", help="do not save trained model"),
+		Arg("--output", "-o", type=str, default=".out", help="output folder"),
+	]
+	parser.add_args(_args, group_name="Output arguments")
+
+
+class TrainingParserMixin(abc.ABC):
+	def __init__(self, *args, **kwargs):
+		super(TrainingParserMixin, self).__init__(*args, **kwargs)
+		add_training_args(self)
+
+
+__all__ = [
+	"TrainingParserMixin",
+	"add_training_args"
+]

+ 34 - 0
cvfinetune/parser/utils.py

@@ -0,0 +1,34 @@
+import os
+import warnings
+
+from cvargparse import BaseParser
+from cvdatasets.utils import read_info_file
+from functools import wraps
+
+
+WARNING = """Could not find default info file \"{}\". """ + \
+"""Some arguments (dataset, parts etc.) are not restraint to certain choices! """ + \
+"""You can set <DATA> environment variable to change the default info file location."""
+
+DEFAULT_INFO_FILE = os.environ.get("DATA")
+def get_info_file():
+
+	if DEFAULT_INFO_FILE is not None and os.path.isfile(DEFAULT_INFO_FILE):
+		return read_info_file(DEFAULT_INFO_FILE)
+	else:
+		warnings.warn(WARNING.format(DEFAULT_INFO_FILE))
+		return None
+
+
+def parser_extender(extender):
+
+	@wraps(extender)
+	def inner(parser):
+		assert isinstance(parser, BaseParser), \
+			"Parser should be an BaseParser instance!"
+
+		extender(parser)
+
+		return parser
+
+	return inner

+ 1 - 1
requirements.txt

@@ -11,6 +11,6 @@ chainer>=4.2.0,<7.0
 cupy-cuda100>=4.2.0,<7.0
 
 # my own packages
-cvargparse~=0.1
+cvargparse~=0.2
 cvdatasets~=0.4
 chainer_addons~=0.6