Browse Source

added basic example

Dimitri Korsch 6 years ago
parent
commit
569121ec52

+ 48 - 0
examples/basic/main.py

@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+if __name__ != '__main__': raise Exception("Do not import me!")
+
+import socket
+if socket.gethostname() != "sigma25":
+	import matplotlib
+	matplotlib.use('Agg')
+
+import chainer
+import logging
+
+from chainer.training.updaters import StandardUpdater
+
+from chainer_addons.models.classifier import Classifier
+
+from finetune.finetuner import DefaultFinetuner
+from finetune.training.trainer import Trainer
+from finetune.dataset import BaseDataset
+from finetune.classifier import Classifier
+
+
+from utils import parser
+
+def main(args):
+	if args.debug:
+		chainer.set_debug(args.debug)
+		logging.warning("DEBUG MODE ENABLED!")
+
+
+	tuner = DefaultFinetuner(
+		args,
+		classifier_cls=Classifier,
+		classifier_kwargs={},
+		model_kwargs=dict(
+			pooling=args.pooling,
+		),
+
+		dataset_cls=BaseDataset,
+
+		updater_cls=StandardUpdater,
+		updater_kwargs={},
+	)
+
+
+	tuner.run(trainer_cls=Trainer, opts=args)
+
+
+main(parser.parse_args())

+ 75 - 0
examples/basic/scripts/config.sh

@@ -0,0 +1,75 @@
+source /home/korsch/.anaconda3/etc/profile.d/conda.sh
+conda activate chainer4
+
+if [[ $GDB == "1" ]]; then
+	PYTHON="gdb -ex run --args python"
+
+elif [[ $MPI == "1" ]]; then
+	N_MPI=${N_MPI:-2}
+	HOSTFILE=${HOSTFILE:-hosts.conf}
+	PYTHON="mpirun -n $N_MPI --hostfile ${HOSTFILE} -x PATH -x CUDA_PATH python"
+	OPTS="${OPTS} --mpi"
+
+elif [[ $PROFILE == "1" ]]; then
+	PYTHON="python -m cProfile -o profile"
+
+else
+	PYTHON="python"
+
+fi
+
+RUN_SCRIPT="../main.py"
+
+###### Dataset config ######
+
+BASE_DIR=/home/korsch/Data
+
+OPTIMIZER=${OPTIMIZER:-adam}
+MODEL_TYPE=${MODEL_TYPE:-resnet}
+PREPARE_TYPE=${PREPARE_TYPE:-model}
+
+MODEL_DIR=${BASE_DIR}/MODELS/${MODEL_TYPE}
+
+OUTPUT_DIR=${OUTPUT_DIR:-../.results/ft_${DATASET}/${OPTIMIZER}${OUTPUT_SUFFIX}}
+
+
+###### Training config ######
+
+BATCH_SIZE=${BATCH_SIZE:-24}
+GPU=${GPU:-"0"}
+
+EPOCHS=${EPOCHS:-100}
+
+DECAY=${DECAY:-5e-4}
+LR=${LR:-"-lr 1e-3 -lrd 1e-1 -lrt 1e-8 -lrs 20"}
+
+FINAL_POOLING=${FINAL_POOLING:-g_avg}
+
+N_JOBS=${N_JOBS:-1}
+
+if [[ $N_JOBS != "0" ]]; then
+	export OMP_NUM_THREADS=2
+fi
+
+###### OPTIONS ######
+
+OPTS="${OPTS} --epochs ${EPOCHS}"
+OPTS="${OPTS} --gpu ${GPU}"
+OPTS="${OPTS} --batch_size ${BATCH_SIZE}"
+
+# if [[ -f sacred/creds.sh ]]; then
+# 	source sacred/creds.sh
+# else
+# 	echo "No sacred credentials found! Disabling sacred."
+# 	OPTS="${OPTS} --no_sacred"
+# fi
+
+OPTS="${OPTS} --augment"
+OPTS="${OPTS} --model_type ${MODEL_TYPE}"
+OPTS="${OPTS} --prepare_type ${PREPARE_TYPE}"
+OPTS="${OPTS} --n_jobs ${N_JOBS}"
+OPTS="${OPTS} --optimizer ${OPTIMIZER}"
+OPTS="${OPTS} --output ${OUTPUT_DIR}"
+OPTS="${OPTS} --pooling ${FINAL_POOLING}"
+OPTS="${OPTS} --decay ${DECAY}"
+OPTS="${OPTS} ${LR}"

+ 35 - 0
examples/basic/scripts/train.sh

@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# resnet inception inception_tf [vgg]
+MODEL_TYPE=${MODEL_TYPE:-inception}
+DATA=${DATA:-/home/korsch/Data/info.yml}
+
+
+GPU=${GPU:-0}
+N_JOBS=${N_JOBS:-3}
+
+OPTIMIZER=${OPTIMIZER:-rmsprop}
+LR=${LR:-"-lr 1e-4 -lrd 0.1 -lrt 1e-6 -lrs 20"}
+DECAY=${DECAY:-5e-4}
+EPOCHS=${EPOCHS:-60}
+BATCH_SIZE=${BATCH_SIZE:-32}
+
+export OMP_NUM_THREADS=2
+
+DATASET=${DATASET:-CUB200}
+# NAC GT GT2 L1_pred L1_full
+PARTS=${PARTS:-GLOBAL}
+
+source config.sh
+
+
+OPTS="${OPTS} --label_smoothing 0.1"
+OPTS="${OPTS} --input_size 299"
+
+$PYTHON $RUN_SCRIPT \
+	${DATA} \
+	${DATASET} \
+	${DATASET}_${PARTS} \
+	${OPTS} \
+	$@
+

+ 0 - 0
examples/basic/utils/__init__.py


+ 48 - 0
examples/basic/utils/parser.py

@@ -0,0 +1,48 @@
+import os
+
+from cvargparse import GPUParser, Arg
+from chainer_addons.links import PoolingType
+
+from finetune.parser import default_factory
+
+
+def parse_args():
+
+	parser = GPUParser(default_factory([
+
+			PoolingType.as_arg("pooling",
+				help_text="type of pre-classification pooling"),
+
+			# Arg("--triplet_loss", action="store_true",
+			# 	help="Use triplet loss"),
+
+
+			# Arg("--normalize", action="store_true",
+			# 	help="normalize features after cbil- or alpha-poolings"),
+
+			# Arg("--subset", "-s", type=int, nargs="*", default=[-1], help="select specific classes"),
+			# Arg("--no_sacred", action="store_true", help="do save outputs to sacred"),
+
+			# Arg("--use_parts", action="store_true",
+			# 	help="use parts, if present"),
+			# Arg("--simple_parts", action="store_true",
+			# 	help="use simple parts classifier, that only concatenates the features"),
+			# Arg("--no_global", action="store_true",
+			# 	help="use parts only, no global feature"),
+
+
+			# Arg("--parts_in_bb", action="store_true", help="take only uniform regions where the centers are inside the bounding box"),
+
+			# Arg("--rnd_select", action="store_true", help="hide random uniform regions of the image"),
+			# Arg("--recurrent", action="store_true", help="observe all parts in recurrent manner instead of the whole image at once"),
+
+			# ## AlphaPooling options
+			# Arg("--init_alpha", type=int, default=1, help="initial parameter for alpha pooling"),
+			# Arg("--kappa", type=float, default=1., help="Learning rate factor for alpha pooling"),
+			# Arg("--switch_epochs", type=int, default=0, help="train alpha pooling layer and the rest of the network alternating")
+		])
+	)
+
+	parser.init_logger()
+
+	return parser.parse_args()