from cvargparse import BaseParser, Arg from nabirds.annotations import AnnotationType def parse_args(): parser = BaseParser([ Arg("data", help="Folder containing the dataset with images and annotation files or dataset info file", type=str), AnnotationType.as_arg("dataset", help_text="Type of the annotation"), Arg("--parts", "-p", choices=["GT", "GT2", "NAC", "UNI", "L1_pred", "L1_full"]), Arg("--feature_model", "-fm", choices=["inception", "inception_tf", "resnet"]), Arg("--subset", "-sub", help="Possible subsets: train, test", choices=["train", "test"], default="train", type=str), Arg("--start", "-s", help="Image id to start with", type=int, default=0), Arg("--n_images", "-n", help="Number of images to display", type=int, default=10), Arg("--rnd", help="select random subset of present parts", action="store_true"), Arg("--crop_to_bb", help="Crop image to the bounding box", action="store_true"), Arg("--crop_uniform", help="Try to extend the bounding box to same height and width", action="store_true"), Arg("--parts_in_bb", help="Only display parts, that are inside the bounding box", action="store_true"), Arg("--features", help="pre-extracted train and test features", default=[None, None], nargs=2, type=str), Arg("--ratio", help="Part extraction ratio", type=float, default=.2), Arg("--rescale_size", help="rescales the part positions from this size to original image size", type=int, default=-1), Arg("--uniform_parts", "-u", help="Do not use GT parts, but sample parts uniformly from the image", action="store_true"), Arg('--seed', type=int, default=12311123, help='random seed'), ]) parser.init_logger() return parser.parse_args()