Clemens-Alexander Brust 3 жил өмнө
parent
commit
ea50f6f187

+ 0 - 11
LICENSE

@@ -1,11 +0,0 @@
-Copyright 2021 Clemens-Alexander Brust
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 2 - 2
README.md

@@ -1,3 +1,3 @@
-# chillax
+# CHILLAX - Concept Hierarchies for Imprecise Label Learning and Annotation eXtrapolation
 
-CHILLAX - Concept Hierarchies for Imprecise Label Learning and Annotation eXtrapolation
+CHILLAX is now part of [CHIA](https://github.com/cvjena/chia).

+ 0 - 3
chillax/__init__.py

@@ -1,3 +0,0 @@
-from chillax.version import __version__
-
-__all__ = ["__version__"]

+ 0 - 54
chillax/experiment_selfsupervised.py

@@ -1,54 +0,0 @@
-from chia import containers, instrumentation
-from chia import helpers
-from chillax import methods
-
-import config as pcfg
-import argparse
-
-
-def main(config_files):
-    # Set up buffered observer
-    buffered_observer = instrumentation.ObserverFactory.create({"name": "buffered"})
-
-    # Set some important environment variables and validate the GPU configuration
-    helpers.setup_environment([buffered_observer])
-
-    configs = [
-        pcfg.config_from_json(config_file, read_from_file=True)
-        for config_file in config_files
-    ] + [helpers.get_user_config()]
-    config = pcfg.ConfigurationSet(*configs)
-
-    # Need to register our fancy new methods
-    methods.update_chia_factories()
-
-    obs = instrumentation.NamedObservable("Experiment")
-
-    experiment_container = containers.ExperimentContainer(config, outer_observable=obs)
-
-    # Replay the buffer
-    buffered_observer.replay_messages(obs)
-
-    with experiment_container.exception_shroud:
-        obs.log_info("Hello!")
-
-        # Now, build the extrapolator
-        if "extrapolator" in config.keys(levels=1):
-            extrapolator = methods.CHILLAXExtrapolatorFactory.create(
-                config["extrapolator"],
-                knowledge_base=experiment_container.knowledge_base,
-                observers=experiment_container.observers,
-            )
-            experiment_container.classifier.extrapolator = extrapolator
-
-        experiment_container.runner.run()
-
-    # Make sure all the data is saved
-    obs.send_shutdown(successful=True)
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(prog="chillax.experiment_selfsupervised")
-    parser.add_argument("config_file", type=str, nargs="+")
-    args = parser.parse_args()
-    main(config_files=args.config_file)

+ 0 - 81
chillax/information_content.py

@@ -1,81 +0,0 @@
-import math
-import abc
-import networkx as nx
-
-from chia import components
-
-
-class InformationContentCalculator(abc.ABC):
-    @abc.abstractmethod
-    def calculate_information_content(
-        self, concept_uid: str, rgraph: nx.DiGraph
-    ) -> float:
-        pass
-
-
-class Sanchez2011OriginalICC(InformationContentCalculator):
-    def calculate_information_content(self, concept_uid: str, rgraph: nx.DiGraph):
-        exclusive_leaves = set(
-            filter(
-                lambda n: rgraph.out_degree[n] == 0, nx.descendants(rgraph, concept_uid)
-            )
-        ) - {concept_uid}
-
-        all_leaves = set(filter(lambda n: rgraph.out_degree[n] == 0, rgraph.nodes))
-
-        ancestors = set(nx.ancestors(rgraph, concept_uid)) | {concept_uid}
-
-        index = -math.log(
-            ((len(exclusive_leaves) / float(len(ancestors))) + 1.0)
-            / (float(len(all_leaves)) + 1.0)
-        )
-
-        return math.fabs(index)
-
-
-class Sanchez2011ModifiedICC(InformationContentCalculator):
-    def calculate_information_content(self, concept_uid: str, rgraph: nx.DiGraph):
-
-        all_leaves = set(filter(lambda n: rgraph.out_degree[n] == 0, rgraph.nodes))
-
-        non_exclusive_leaves = (
-            set(nx.descendants(rgraph, concept_uid)) | {concept_uid}
-        ) & all_leaves
-
-        ancestors = set(nx.ancestors(rgraph, concept_uid)) | {concept_uid}
-
-        index = -math.log(
-            ((len(non_exclusive_leaves) / float(len(ancestors))) + 1.0)
-            / (float(len(all_leaves)) + 1.0)
-        )
-
-        return math.fabs(index)
-
-
-class Zhou2008ModifiedICC(InformationContentCalculator):
-    def calculate_information_content(self, concept_uid: str, rgraph: nx.DiGraph):
-        root = next(nx.topological_sort(rgraph))
-
-        all_leaves = set(filter(lambda n: rgraph.out_degree[n] == 0, rgraph.nodes))
-        all_leaf_depths = [
-            nx.shortest_path_length(rgraph, root, leaf) for leaf in all_leaves
-        ]
-        highest_depth = max(all_leaf_depths)
-        uid_depth = nx.shortest_path_length(rgraph, root, concept_uid)
-        descendants = set(nx.descendants(rgraph, concept_uid)) | {concept_uid}
-
-        k = 0.6  # Harispe et al. 2015, page 55 claims that this is the "original" value.
-        index1 = 1.0 - (math.log(len(descendants)) / math.log(len(rgraph.nodes)))
-        index2 = math.log(uid_depth + 1) / math.log(highest_depth + 1)
-
-        index = k * index1 + (1.0 - k) * index2
-
-        return math.fabs(index)
-
-
-class InformationContentCalculatorFactory(components.Factory):
-    name_to_class_mapping = {
-        "sanchez_2011_original": Sanchez2011OriginalICC,
-        "sanchez_2011_modified": Sanchez2011ModifiedICC,
-        "zhou_2008_modified": Zhou2008ModifiedICC,
-    }

+ 0 - 20
chillax/methods/__init__.py

@@ -1,20 +0,0 @@
-from chia.components import classifiers, interactors, sample_transformers
-
-from chillax.methods import chillax_classifier, noisy_oracle, sample_weight_by_ic
-
-from chillax.methods.chillax_extrapolator import CHILLAXExtrapolatorFactory
-
-
-def update_chia_factories():
-    classifiers.ClassifierFactory.name_to_class_mapping.update(
-        {"chillax": chillax_classifier.CHILLAXKerasHC}
-    )
-    interactors.InteractorFactory.name_to_class_mapping.update(
-        {"noisy_oracle": noisy_oracle.NoisyOracleInteractor}
-    )
-    sample_transformers.SampleTransformerFactory.name_to_class_mapping.update(
-        {"sample_weight_by_ic": sample_weight_by_ic.SampleWeightByICSampleTransfomer}
-    )
-
-
-__all__ = ["CHILLAXExtrapolatorFactory", "update_chia_factories"]

+ 0 - 451
chillax/methods/chillax_classifier.py

@@ -1,451 +0,0 @@
-from chia.components.classifiers import keras_hierarchicalclassification
-from chia import instrumentation, knowledge
-
-import networkx as nx
-import numpy as np
-import tensorflow as tf
-import pickle
-
-
-class CHILLAXKerasHC(
-    keras_hierarchicalclassification.EmbeddingBasedKerasHC, instrumentation.Observable
-):
-    def __init__(
-        self,
-        kb,
-        l2=5e-5,
-        force_prediction_targets=True,
-        raw_output=False,
-        weighting="default",
-        gain_compensation="simple",
-    ):
-        keras_hierarchicalclassification.EmbeddingBasedKerasHC.__init__(self, kb)
-        instrumentation.Observable.__init__(self)
-
-        # Configuration
-        self._l2_regularization_coefficient = l2
-
-        self._force_prediction_targets = force_prediction_targets
-
-        self._raw_output = raw_output
-        if self._raw_output and self._force_prediction_targets:
-            raise ValueError(
-                "Cannot use raw output and forced prediction targets at the same time!"
-            )
-
-        self._weighting = weighting
-        self._gain_compensation = gain_compensation
-
-        self.fc_layer = None
-        self.uid_to_dimension = {}
-        self.graph = None
-        self.prediction_target_uids = None
-        self.topo_sorted_uids = None
-        self.loss_weights = None
-        self.update_embedding()
-
-        self.extrapolator = None
-
-        self._reporting_step_counter = 0
-        self._last_reported_step = -1
-        self._running_sample_count = 0
-        self._running_changed_samples = 0
-
-    def predict_embedded(self, feature_batch):
-        return self.fc_layer(feature_batch)
-
-    def embed(self, labels):
-        embedding = np.zeros((len(labels), len(self.uid_to_dimension)))
-        for i, label in enumerate(labels):
-            if label == "chia::UNCERTAIN":
-                embedding[i] = 1.0
-            else:
-                embedding[i, self.uid_to_dimension[label]] = 1.0
-                for ancestor in nx.ancestors(self.graph, label):
-                    embedding[i, self.uid_to_dimension[ancestor]] = 1.0
-
-        return embedding
-
-    def deembed_dist(self, embedded_labels):
-        return [
-            self._deembed_single(embedded_label) for embedded_label in embedded_labels
-        ]
-
-    def _deembed_single(self, embedded_label):
-        conditional_probabilities = self._calculate_conditional_probabilities(
-            embedded_label
-        )
-
-        if self._raw_output:
-            # Directly output conditional probabilities
-            return list(conditional_probabilities.items())
-        else:
-            unconditional_probabilities = self._calculate_unconditional_probabilities(
-                conditional_probabilities
-            )
-
-            # Note: Stage 2 from IDK is missing here. This is on purpose.
-            tuples = unconditional_probabilities.items()
-            sorted_tuples = list(sorted(tuples, key=lambda tup: tup[1], reverse=True))
-
-            # If requested, only output scores for the forced prediction targets
-            if self._force_prediction_targets:
-                for i, (uid, p) in enumerate(sorted_tuples):
-                    if uid not in self.prediction_target_uids:
-                        sorted_tuples[i] = (uid, 0.0)
-
-                total_scores = sum([p for uid, p in sorted_tuples])
-                if total_scores > 0:
-                    sorted_tuples = [
-                        (uid, p / total_scores) for uid, p in sorted_tuples
-                    ]
-
-            return list(sorted_tuples)
-
-    def _calculate_conditional_probabilities(self, embedded_label):
-        conditional_probabilities = {
-            uid: embedded_label[i] for uid, i in self.uid_to_dimension.items()
-        }
-        return conditional_probabilities
-
-    def _calculate_unconditional_probabilities(self, conditional_probabilities):
-        # Calculate the unconditional probabilities
-        unconditional_probabilities = {}
-        for uid in self.topo_sorted_uids:
-            unconditional_probability = conditional_probabilities[uid]
-
-            no_parent_probability = 1.0
-            has_parents = False
-            for parent in self.graph.predecessors(uid):
-                has_parents = True
-                no_parent_probability *= 1.0 - unconditional_probabilities[parent]
-
-            if has_parents:
-                unconditional_probability *= 1.0 - no_parent_probability
-
-            unconditional_probabilities[uid] = unconditional_probability
-
-        return unconditional_probabilities
-
-    def update_embedding(self):
-        current_concepts = self.kb.concepts()
-        current_concept_count = len(current_concepts)
-        self.report_metric("current_concepts", current_concept_count)
-
-        if current_concept_count == 0:
-            return True
-
-        try:
-            old_weights = self.fc_layer.get_weights()
-            old_uidtodim = self.uid_to_dimension
-            old_graph = self.graph
-
-        except Exception:
-            old_weights = []
-            old_uidtodim = []
-            old_graph = None
-
-        self.fc_layer = tf.keras.layers.Dense(
-            current_concept_count,
-            activation="sigmoid",
-            kernel_regularizer=tf.keras.regularizers.l2(
-                self._l2_regularization_coefficient
-            )
-            if self._l2_regularization_coefficient > 0.0
-            else None,
-            kernel_initializer="zero",
-            bias_initializer="zero",
-        )
-
-        try:
-            self.graph = self.kb.get_hyponymy_relation_rgraph()
-        except ValueError:
-            return False
-
-        # Memorize topological sorting for later
-        all_uids = nx.topological_sort(self.graph)
-        self.topo_sorted_uids = list(all_uids)
-        assert len(current_concepts) == len(self.topo_sorted_uids)
-
-        self.uid_to_dimension = {
-            uid: dimension for dimension, uid in enumerate(self.topo_sorted_uids)
-        }
-
-        self.prediction_target_uids = {
-            concept.uid
-            for concept in self.kb.concepts(
-                flags={knowledge.ConceptFlag.PREDICTION_TARGET}
-            )
-        }
-
-        if len(old_weights) == 2:
-            # Layer can be updated
-            new_weights = np.zeros([old_weights[0].shape[0], current_concept_count])
-            new_biases = np.zeros([current_concept_count])
-
-            reused_concepts = 0
-            for new_uid, dim in self.uid_to_dimension.items():
-                # Check if old weight is even available
-                if new_uid not in old_uidtodim.keys():
-                    continue
-
-                # Check if parents have changed
-                if set(self.graph.predecessors(new_uid)) != set(
-                    old_graph.predecessors(new_uid)
-                ):
-                    continue
-
-                new_weights[:, dim] = old_weights[0][:, old_uidtodim[new_uid]]
-                new_biases[dim] = old_weights[1][old_uidtodim[new_uid]]
-                reused_concepts += 1
-
-            self.report_metric("reused_concepts", reused_concepts)
-
-            self.fc_layer.build([None, old_weights[0].shape[0]])
-            self.fc_layer.set_weights([new_weights, new_biases])
-
-        self.update_loss_weights()
-        return True
-
-    def update_loss_weights(self):
-        if len(self.prediction_target_uids) == 0:
-            self.log_debug("Skipping loss weight update, no concepts found.")
-            self.loss_weights = []
-            return
-
-        self.log_debug(
-            f"Updating loss weights. Strategy: {self._weighting}, "
-            f"gain compensation: {self._gain_compensation}"
-        )
-
-        # (1) Calculate "natural" weights by assuming uniform distribution
-        # over observed concepts
-        occurences = {uid: 0 for uid in self.topo_sorted_uids}
-        for uid in self.prediction_target_uids:
-            affected_uids = {uid}
-            affected_uids |= nx.ancestors(self.graph, uid)
-            for affected_uid in list(affected_uids):
-                affected_uids |= set(self.graph.successors(affected_uid))
-
-            for affected_uid in affected_uids:
-                occurences[affected_uid] += 1
-
-        occurrence_vector = np.array([occurences[uid] for uid in self.uid_to_dimension])
-
-        # (2) Calculate weight vector
-        if self._weighting == "default":
-            self.loss_weights = np.ones(len(self.uid_to_dimension))
-
-        elif self._weighting == "equalize":
-            try:
-                self.loss_weights = (
-                    np.ones(len(self.uid_to_dimension)) / occurrence_vector
-                )
-            except ZeroDivisionError as err:
-                self.log_fatal("Division by zero in equalize loss weighting strategy.")
-                raise err
-
-        elif self._weighting == "descendants":
-            try:
-                # Start with an equal weighting
-                self.loss_weights = (
-                    np.ones(len(self.uid_to_dimension)) / occurrence_vector
-                )
-
-                for i, uid in enumerate(self.uid_to_dimension):
-                    self.loss_weights[i] *= (
-                        len(nx.descendants(self.graph, uid)) + 1.0
-                    )  # Add one for the node itself.
-            except ZeroDivisionError as err:
-                self.log_fatal(
-                    "Division by zero in descendants loss weighting strategy."
-                )
-                raise err
-
-        elif self._weighting == "reachable_leaf_nodes":
-            try:
-                # Start with an equal weighting
-                self.loss_weights = (
-                    np.ones(len(self.uid_to_dimension)) / occurrence_vector
-                )
-
-                for i, uid in enumerate(self.uid_to_dimension):
-                    descendants = set(nx.descendants(self.graph, uid)) | {uid}
-                    reachable_leaf_nodes = descendants.intersection(
-                        self.prediction_target_uids
-                    )
-                    self.loss_weights[i] *= len(reachable_leaf_nodes)
-
-                    # Test if any leaf nodes are reachable
-                    if len(reachable_leaf_nodes) == 0:
-                        raise ValueError(
-                            f"In this hierarchy, the node {uid} cannot reach "
-                            "any leaf nodes!"
-                        )
-
-            except ZeroDivisionError as err:
-                self.log_fatal(
-                    "Division by zero in reachable_leaf_nodes loss weighting strategy."
-                )
-                raise err
-
-        else:
-            raise ValueError(f'Unknown loss weighting strategy "{self._weighting}"')
-
-        # Normalize so we don't have to adapt the learning rate a lot.
-        if self._gain_compensation == "simple":
-            gain = np.mean(self.loss_weights)
-        elif self._gain_compensation == "per_element":
-            gain = np.mean(self.loss_weights * occurrence_vector) / np.mean(
-                occurrence_vector
-            )
-        else:
-            raise ValueError(
-                f'Unknown gain compensation setting "{self._gain_compensation}"'
-            )
-
-        self.report_metric("gain_from_weighting", gain)
-        self.loss_weights /= gain
-
-    def loss(self, feature_batch, ground_truth, weight_batch, global_step):
-        if not self.is_updated:
-            raise RuntimeError(
-                "This classifier is not yet ready to compute a loss. "
-                "Check if it has been notified of a hyponymy relation."
-            )
-
-        self._reporting_step_counter = global_step
-
-        # (1) Predict
-        prediction = self.predict_embedded(feature_batch)
-
-        # (2) Extrapolate ground truth
-        extrapolated_ground_truth = self._extrapolate(ground_truth, prediction)
-
-        # (3) Compute loss mask
-        loss_mask = np.zeros(
-            (len(extrapolated_ground_truth), len(self.uid_to_dimension))
-        )
-        for i, label in enumerate(extrapolated_ground_truth):
-            # Loss mask
-            loss_mask[i, self.uid_to_dimension[label]] = 1.0
-
-            for ancestor in nx.ancestors(self.graph, label):
-                loss_mask[i, self.uid_to_dimension[ancestor]] = 1.0
-                for successor in self.graph.successors(ancestor):
-                    loss_mask[i, self.uid_to_dimension[successor]] = 1.0
-                    # This should also cover the node itself, but we do it anyway
-
-            if not self._force_prediction_targets:
-                # Learn direct successors in order to "stop"
-                # prediction at these nodes.
-                # If MLNP is active, then this can be ignored.
-                # Because we never want to predict
-                # inner nodes, we interpret labels at
-                # inner nodes as imprecise labels.
-                for successor in self.graph.successors(label):
-                    loss_mask[i, self.uid_to_dimension[successor]] = 1.0
-
-        # (4) Embed ground truth
-        embedded_ground_truth = self.embed(extrapolated_ground_truth)
-
-        # (5) Compute binary cross entropy loss function
-        clipped_probs = tf.clip_by_value(prediction, 1e-7, (1.0 - 1e-7))
-        the_loss = -(
-            embedded_ground_truth * tf.math.log(clipped_probs)
-            + (1.0 - embedded_ground_truth) * tf.math.log(1.0 - clipped_probs)
-        )
-
-        sum_per_batch_element = tf.reduce_sum(
-            the_loss * loss_mask * self.loss_weights, axis=1
-        )
-
-        return tf.reduce_mean(sum_per_batch_element * weight_batch)
-
-    def observe(self, samples, gt_resource_id):
-        self.maybe_update_embedding()
-
-    def regularization_losses(self):
-        return self.fc_layer.losses
-
-    def trainable_variables(self):
-        return self.fc_layer.trainable_variables
-
-    def save(self, path):
-        with open(path + "_hc.pkl", "wb") as target:
-            pickle.dump(self.fc_layer.get_weights(), target)
-
-        with open(path + "_uidtodim.pkl", "wb") as target:
-            pickle.dump((self.uid_to_dimension,), target)
-
-    def restore(self, path):
-        self.maybe_update_embedding()
-        with open(path + "_hc.pkl", "rb") as target:
-            new_weights = pickle.load(target)
-            has_weights = False
-            try:
-                has_weights = len(self.fc_layer.get_weights()) == 2
-            except Exception:
-                pass
-
-            if not has_weights:
-                self.fc_layer.build([None, new_weights[0].shape[0]])
-
-            self.fc_layer.set_weights(new_weights)
-
-        with open(path + "_uidtodim.pkl", "rb") as target:
-            (self.uid_to_dimension,) = pickle.load(target)
-
-        self.update_embedding()
-
-    def _extrapolate(self, ground_truth, embedded_prediction):
-        # Only do anything if there is an extrapolator
-        if self.extrapolator is not None:
-            epn = embedded_prediction.numpy()
-            extrapolator_inputs = []
-            for i, ground_truth_element in enumerate(ground_truth):
-                # Get the raw scores
-                conditional_probabilities = self._calculate_conditional_probabilities(
-                    epn[i]
-                )
-
-                # If the extrapolator wants it, apply the ground truth to the prediction at the
-                # conditional probability level.
-                if self.extrapolator.apply_ground_truth:
-                    label_true = {ground_truth_element}
-                    known = {ground_truth_element}
-                    for ancestor in nx.ancestors(self.graph, ground_truth_element):
-                        label_true |= {ancestor}
-                        known |= {ancestor}
-                        for child in self.graph.successors(ancestor):
-                            known |= {child}
-
-                    for uid in known:
-                        conditional_probabilities[uid] = (
-                            1.0 if uid in label_true else 0.0
-                        )
-
-                # Calculate unconditionals and extrapolate
-                unconditional_probabilities = (
-                    self._calculate_unconditional_probabilities(
-                        conditional_probabilities
-                    )
-                )
-                extrapolator_inputs += [
-                    (ground_truth_element, unconditional_probabilities)
-                ]
-
-            extrapolated_ground_truth = self.extrapolator.extrapolate(
-                extrapolator_inputs
-            )
-
-            # Handle reporting
-            if self._reporting_step_counter % 10 == 9:
-                if self._last_reported_step < self._reporting_step_counter:
-                    self.extrapolator.reporting_report(self._reporting_step_counter)
-
-                    self._last_reported_step = self._reporting_step_counter
-
-            return extrapolated_ground_truth
-        else:
-            return ground_truth

+ 0 - 441
chillax/methods/chillax_extrapolator.py

@@ -1,441 +0,0 @@
-from chia import components, knowledge, instrumentation
-
-from chillax import information_content
-
-import abc
-import collections
-import typing
-
-import networkx as nx
-import numpy as np
-
-
-class CHILLAXExtrapolator(
-    instrumentation.Observer, instrumentation.Observable, abc.ABC
-):
-    def __init__(
-        self,
-        knowledge_base: knowledge.KnowledgeBase,
-        apply_ground_truth: bool,
-        ic_method: typing.Optional[str] = None,
-    ):
-        instrumentation.Observable.__init__(self)
-        self.knowledge_base = knowledge_base
-        self.knowledge_base.register(self)
-
-        self.apply_ground_truth = apply_ground_truth
-
-        self.is_updated = False
-
-        # Graph Cache
-        self._rgraph = nx.DiGraph()
-        self._uid_to_depth = dict()
-        self._prediction_targets = set()
-
-        # Information Content Cache
-        self._ic_calc: information_content.InformationContentCalculator = (
-            information_content.InformationContentCalculatorFactory.create(
-                {"name": ic_method if ic_method is not None else "zhou_2008_modified"}
-            )
-        )
-        self._ic_cache = dict()
-
-        self.update_relations_and_concepts()
-
-        # Reporting
-        self._reporting_samples_total = 0
-        self._reporting_samples_changed = 0
-        self._reporting_cum_ic_gain = 0
-
-    def extrapolate(self, extrapolator_inputs):
-        if not self.is_updated:
-            raise RuntimeError(
-                "This extrapolator is not updated. "
-                "Please check if it is subscribed to "
-                "RelationChange and ConceptChange messages."
-            )
-
-        outputs = []
-        for ground_truth_uid, unconditional_probabilities in extrapolator_inputs:
-            outputs += [
-                self._extrapolate(ground_truth_uid, unconditional_probabilities)
-            ]
-
-        self._reporting_update(
-            zip([gt_uid for gt_uid, _ in extrapolator_inputs], outputs)
-        )
-        return outputs
-
-    def _reporting_update(self, label_pairs):
-        for gt_uid, ext_uid in label_pairs:
-            if gt_uid != ext_uid:
-                self._reporting_samples_changed += 1
-                self._reporting_cum_ic_gain += (
-                    self._ic_cache[ext_uid] - self._ic_cache[gt_uid]
-                )
-
-            self._reporting_samples_total += 1
-
-    def _reporting_reset(self):
-        self._reporting_samples_total = 0
-        self._reporting_samples_changed = 0
-        self._reporting_cum_ic_gain = 0
-
-    def reporting_report(self, current_step):
-        if self._reporting_samples_total == 0:
-            return
-
-        self.report_metric(
-            "extrapolation_changed_sample_fraction",
-            self._reporting_samples_changed / float(self._reporting_samples_total),
-            step=current_step,
-        )
-        self.report_metric(
-            "extrapolation_avg_ic_gain",
-            self._reporting_cum_ic_gain / float(self._reporting_samples_total),
-            step=current_step,
-        )
-
-        self._reporting_reset()
-
-    def update(self, message: instrumentation.Message):
-        if isinstance(message, knowledge.RelationChangeMessage) or isinstance(
-            message, knowledge.ConceptChangeMessage
-        ):
-            self.is_updated = False
-            self.update_relations_and_concepts()
-
-    def update_relations_and_concepts(self):
-        try:
-            # Update Information Content Cache
-            self._ic_cache = dict()
-            rgraph = self.knowledge_base.get_hyponymy_relation_rgraph()
-            for concept in self.knowledge_base.concepts():
-                self._ic_cache[
-                    concept.uid
-                ] = self._ic_calc.calculate_information_content(concept.uid, rgraph)
-
-            # Graph Update
-            self._rgraph = self.knowledge_base.get_hyponymy_relation_rgraph()
-            self._prediction_targets = {
-                concept.uid
-                for concept in self.knowledge_base.concepts(
-                    flags={knowledge.ConceptFlag.PREDICTION_TARGET}
-                )
-            }
-
-            root = list(nx.topological_sort(self._rgraph))[0]
-            self._uid_to_depth = {
-                concept.uid: len(nx.shortest_path(self._rgraph, root, concept.uid))
-                for concept in self.knowledge_base.concepts()
-            }
-
-        except ValueError as verr:
-            self.log_warning(f"Could not update extrapolator. {verr.args}")
-
-        self._update_relations_and_concepts()
-
-    @abc.abstractmethod
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        pass
-
-    def _update_relations_and_concepts(self):
-        self.is_updated = True
-
-
-class DoNothingCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        return ground_truth_uid
-
-
-class SimpleThresholdCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def __init__(
-        self,
-        knowledge_base,
-        apply_ground_truth,
-        ic_method: typing.Optional[str] = None,
-        threshold=0.55,
-    ):
-        super().__init__(
-            knowledge_base=knowledge_base,
-            apply_ground_truth=apply_ground_truth,
-            ic_method=ic_method,
-        )
-
-        self._threshold = threshold
-
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        candidates = [
-            uid
-            for (uid, probability) in unconditional_probabilities.items()
-            if probability >= self._threshold
-        ]
-
-        if len(candidates) > 0:
-            candidates_with_ic = [(uid, self._ic_cache[uid]) for uid in candidates]
-
-            # Sort by probability first, see other methods for explanation of noise
-            candidates_with_ic = list(
-                sorted(
-                    candidates_with_ic,
-                    key=lambda x: unconditional_probabilities[x[0]]
-                    + np.random.normal(0, 0.0001),
-                    reverse=True,
-                )
-            )
-
-            # Sort by IC second. Stable sorting is guaranteed by python.
-            candidates_with_ic = list(
-                sorted(candidates_with_ic, key=lambda x: x[1], reverse=True)
-            )
-            return candidates_with_ic[0][0]
-        else:
-            return ground_truth_uid
-
-
-class DepthStepsCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def __init__(
-        self,
-        knowledge_base,
-        apply_ground_truth,
-        ic_method: typing.Optional[str] = None,
-        steps=1,
-        threshold=None,
-    ):
-        super().__init__(
-            knowledge_base=knowledge_base,
-            apply_ground_truth=apply_ground_truth,
-            ic_method=ic_method,
-        )
-
-        self._steps = steps
-        self._threshold = threshold
-
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        original_depth = self._uid_to_depth[ground_truth_uid]
-        allowed_depth = original_depth + self._steps
-
-        allowed_uids = set()
-        for descendant in nx.descendants(self._rgraph, ground_truth_uid):
-            if self._uid_to_depth[descendant] == allowed_depth:
-                allowed_uids |= {descendant}
-            elif (
-                self._uid_to_depth[descendant] < allowed_depth
-                and descendant in self._prediction_targets
-            ):
-                # We need to allow leaf nodes if they are shallower than the allowed depth.
-                # Otherwise, we won't have any candidates sometimes.
-                allowed_uids |= {descendant}
-
-        candidates = [
-            uid
-            for (uid, probability) in unconditional_probabilities.items()
-            if uid in allowed_uids
-        ]
-
-        if len(candidates) > 0:
-            # When sorting by probability, add a very small amount of noise because of the nodes
-            # that return exactly 0.5. Otherwise, the sorting is done alphabetically or topologically,
-            # creating a bias.
-            candidates = list(
-                sorted(
-                    candidates,
-                    key=lambda x: unconditional_probabilities[x]
-                    + np.random.normal(0, 0.0001),
-                    reverse=True,
-                )
-            )
-            if self._threshold is not None:
-                candidates = [
-                    candidate
-                    for candidate in candidates
-                    if unconditional_probabilities[candidate] > self._threshold
-                ]
-            if len(candidates) > 0:
-                return candidates[0]
-            else:
-                return ground_truth_uid
-        else:
-            return ground_truth_uid
-
-
-class ForcePredictionTargetCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def __init__(
-        self, knowledge_base, apply_ground_truth, ic_method: typing.Optional[str] = None
-    ):
-        super().__init__(
-            knowledge_base=knowledge_base,
-            apply_ground_truth=apply_ground_truth,
-            ic_method=ic_method,
-        )
-
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        candidates = [
-            uid
-            for (uid, probability) in unconditional_probabilities.items()
-            if uid in self._prediction_targets
-        ]
-
-        if len(candidates) > 0:
-            # When sorting by probability, add a very small amount of noise because of the nodes
-            # that return exactly 0.5. Otherwise, the sorting is done alphabetically or topologically,
-            # creating a bias.
-            candidates = list(
-                sorted(
-                    candidates,
-                    key=lambda x: unconditional_probabilities[x]
-                    + np.random.normal(0, 0.0001),
-                    reverse=True,
-                )
-            )
-            return candidates[0]
-        else:
-            return ground_truth_uid
-
-
-class ICGainRangeCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def __init__(
-        self,
-        knowledge_base,
-        apply_ground_truth,
-        ic_method: typing.Optional[str] = None,
-        ic_gain_target=None,
-        ic_range=0.2,
-        probability_threshold=0.55,
-    ):
-        super().__init__(
-            knowledge_base=knowledge_base,
-            apply_ground_truth=apply_ground_truth,
-            ic_method=ic_method,
-        )
-
-        self._ic_gain_target = ic_gain_target
-        self._ic_range = ic_range
-        self._probability_threshold = probability_threshold
-
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        # We need this more often
-        ground_truth_ic = self._ic_cache[ground_truth_uid]
-        target_ic = ground_truth_ic + self._ic_gain_target
-        half_range = self._ic_range / 2.0
-
-        candidates = [
-            uid
-            for (uid, probability) in unconditional_probabilities.items()
-            if -half_range <= (self._ic_cache[uid] - target_ic) <= half_range
-            and probability >= self._probability_threshold
-        ]
-
-        if len(candidates) > 0:
-            candidates_with_ic = [(uid, self._ic_cache[uid]) for uid in candidates]
-
-            # Sort by probability first, see other methods for explanation of noise
-            candidates_with_ic = list(
-                sorted(
-                    candidates_with_ic,
-                    key=lambda x: unconditional_probabilities[x[0]]
-                    + np.random.normal(0, 0.0001),
-                    reverse=True,
-                )
-            )
-
-            # Sort by IC. Stable sorting is guaranteed by python.
-            candidates_with_ic = list(
-                sorted(candidates_with_ic, key=lambda x: x[1], reverse=True)
-            )
-            return candidates_with_ic[0][0]
-        else:
-            return ground_truth_uid
-
-
-class AdaptiveICGainCHILLAXExtrapolator(CHILLAXExtrapolator):
-    def __init__(
-        self,
-        knowledge_base,
-        apply_ground_truth,
-        ic_method: typing.Optional[str] = None,
-        ic_gain_target=None,
-        min_threshold=0.55,
-        max_threshold=1.0,
-        learning_rate=1.0,
-    ):
-        super().__init__(
-            knowledge_base=knowledge_base,
-            apply_ground_truth=apply_ground_truth,
-            ic_method=ic_method,
-        )
-
-        self._ic_gain_target = ic_gain_target
-        self._min_threshold = min_threshold
-        self._max_threshold = max_threshold
-        self._learning_rate = learning_rate
-
-        # Initialize the threshold
-        self._threshold = min_threshold
-
-        self._last_ic_gains = collections.deque(maxlen=64)
-
-    def _extrapolate(self, ground_truth_uid, unconditional_probabilities):
-        """This is basically the same as SimpleThresholdCHILLAXExtrapolator, just with added reporting etc."""
-        candidates = [
-            uid
-            for (uid, probability) in unconditional_probabilities.items()
-            if probability >= self._threshold
-        ]
-
-        if len(candidates) > 0:
-            candidates_with_ic = [(uid, self._ic_cache[uid]) for uid in candidates]
-
-            # Sort by probability first, see other methods for explanation of noise
-            candidates_with_ic = list(
-                sorted(
-                    candidates_with_ic,
-                    key=lambda x: unconditional_probabilities[x[0]]
-                    + np.random.normal(0, 0.0001),
-                    reverse=True,
-                )
-            )
-
-            # Sort by IC second. Stable sorting is guaranteed by python.
-            candidates_with_ic = list(
-                sorted(candidates_with_ic, key=lambda x: x[1], reverse=True)
-            )
-            return_value = candidates_with_ic[0][0]
-        else:
-            return_value = ground_truth_uid
-
-        # Compute the actual IC gain of our actions
-        realized_ic_gain = (
-            self._ic_cache[return_value] - self._ic_cache[ground_truth_uid]
-        )
-
-        # Compute average IC gain, maxlen should do the rest :)
-        self._last_ic_gains.append(realized_ic_gain)
-        avg_ic_gain = sum(self._last_ic_gains) / float(len(self._last_ic_gains))
-
-        # Assume that increasing the threshold decreases the possible IC gain
-        # e.g. if average IC is 0.3 too much, increase the threshold by 0.3 (lr=1.0)
-        step = self._learning_rate * (avg_ic_gain - self._ic_gain_target)
-        self._threshold = max(
-            self._min_threshold, min(self._max_threshold, self._threshold + step)
-        )
-
-        return return_value
-
-    def reporting_report(self, current_step):
-        """We want to have a look at the thresholds."""
-        self.report_metric(
-            "extrapolation_current_threshold", self._threshold, current_step
-        )
-        super().reporting_report(current_step)
-
-
-class CHILLAXExtrapolatorFactory(components.Factory):
-    name_to_class_mapping = {
-        "do_nothing": DoNothingCHILLAXExtrapolator,
-        "simple_threshold": SimpleThresholdCHILLAXExtrapolator,
-        "depth_steps": DepthStepsCHILLAXExtrapolator,
-        "force_prediction_target": ForcePredictionTargetCHILLAXExtrapolator,
-        "adaptive_ic_gain": AdaptiveICGainCHILLAXExtrapolator,
-        "ic_gain_range": ICGainRangeCHILLAXExtrapolator,
-    }

+ 0 - 179
chillax/methods/noisy_oracle.py

@@ -1,179 +0,0 @@
-from typing import Optional
-
-import networkx as nx
-import numpy as np
-
-from chia import instrumentation, knowledge
-from chia.components.interactors import interactor
-
-
-class NoisyOracleInteractor(
-    interactor.Interactor, instrumentation.Observable, instrumentation.Observer
-):
-    def __init__(
-        self,
-        kb,
-        noise_model,
-        inaccuracy=0.0,
-        relabel_fraction=None,
-        lambda_=None,
-        q=None,
-        filter_imprecise=False,
-        project_to_random_leaf=False,
-    ):
-        interactor.Interactor.__init__(self, kb=kb)
-        instrumentation.Observable.__init__(self)
-        instrumentation.Observer.__init__(self)
-
-        self.noise_model = noise_model
-        self.inaccuracy = inaccuracy
-
-        if self.noise_model == "Deng2014":
-            assert relabel_fraction is not None
-            self.relabel_fraction: float = relabel_fraction
-        elif self.noise_model == "Poisson":
-            assert lambda_ is not None
-            self.lambda_: float = lambda_
-        elif self.noise_model == "Geometric":
-            assert q is not None
-            self.q: float = q
-        elif self.noise_model == "Inaccuracy":
-            pass
-        else:
-            raise ValueError(f"Unknown noise model: {self.noise_model}")
-
-        self.filter_imprecise = filter_imprecise
-        self.project_to_random_leaf = project_to_random_leaf
-
-        self.is_updated = False
-        self.graph: Optional[nx.DiGraph] = None
-        self.root = None
-        self.leaf_nodes = None
-
-        self._kb.register(self)
-
-    def _apply_deng_noise(self, uid):
-        if np.random.binomial(1, self.relabel_fraction):
-            chosen_predecessor = next(
-                self.graph.predecessors(uid)
-            )  # TODO what to do if there is more than 1 parent?
-            return chosen_predecessor
-        else:
-            return uid
-
-    def _apply_geometric_noise(self, uid):
-        target = np.random.geometric(1 - self.q) - 1
-        return self._reduce_depth_to(uid, target)
-
-    def _apply_poisson_noise(self, uid):
-        target = np.random.poisson(self.lambda_)
-        return self._reduce_depth_to(uid, target)
-
-    def _reduce_depth_to(self, uid, depth_target):
-        path_to_label = nx.shortest_path(self.graph, self.root, uid)
-        final_depth = max(0, min(len(path_to_label) - 1, depth_target))
-        return path_to_label[final_depth]
-
-    def _project_to_random_leaf(self, uid):
-        if self.graph.out_degree(uid) == 0:  # noqa
-            return uid
-        else:
-            # List all descendants
-            all_descendants = nx.descendants(self.graph, uid)
-
-            # Use only leaves
-            valid_descendants = list(
-                filter(lambda n: self.graph.out_degree(n) == 0, all_descendants)  # noqa
-            )
-
-            return np.random.choice(valid_descendants)
-
-    def _maybe_update_graphs(self):
-        if not self.is_updated:
-            try:
-                self.graph = self._kb.get_hyponymy_relation_rgraph()
-                self.root = next(nx.topological_sort(self.graph))
-                self.leaf_nodes = list(
-                    filter(
-                        lambda n: self.graph.out_degree(n) == 0, self.graph.nodes
-                    )  # noqa
-                )
-                self.is_updated = True
-            except ValueError:
-                # No graph available yet
-                pass
-
-    def query_annotations_for(self, samples, gt_resource_id, ann_resource_id):
-        self._maybe_update_graphs()
-
-        # Add noise
-        noisy_samples = [
-            sample.add_resource(
-                self.__class__.__name__,
-                ann_resource_id,
-                self.apply_noise(sample.get_resource(gt_resource_id)),
-            )
-            for sample in samples
-        ]
-
-        # Count modified samples
-        modified_samples = sum(
-            [
-                1
-                if noisy_sample.get_resource(gt_resource_id)
-                != noisy_sample.get_resource(ann_resource_id)
-                else 0
-                for noisy_sample in noisy_samples
-            ]
-        )
-        self.log_debug(f"Modified {modified_samples} out of {len(samples)} samples.")
-
-        # Filter imprecise samples
-        precise_only_samples = [
-            sample
-            for sample in noisy_samples
-            if self.apply_filter(sample.get_resource(ann_resource_id))
-        ]
-        self.log_debug(
-            f"Filtered out {len(samples)-len(precise_only_samples)}"
-            + f" out of {len(samples)} samples."
-        )
-        return precise_only_samples
-
-    def apply_noise(self, uid):
-        # Apply inaccuracy
-        if np.random.uniform() <= self.inaccuracy:
-            assert uid in self.leaf_nodes
-            inaccurate_uid = np.random.choice(self.leaf_nodes)
-        else:
-            inaccurate_uid = uid
-
-        # Select noise model
-        if self.noise_model == "Deng2014":
-            noisy_uid = self._apply_deng_noise(inaccurate_uid)
-        elif self.noise_model == "Geometric":
-            noisy_uid = self._apply_geometric_noise(inaccurate_uid)
-        elif self.noise_model == "Poisson":
-            noisy_uid = self._apply_poisson_noise(inaccurate_uid)
-        elif self.noise_model == "Inaccuracy":
-            noisy_uid = inaccurate_uid
-        else:
-            raise ValueError(f"Unknown noise model {self.noise_model}")
-
-        # Project to random leaf
-        if self.project_to_random_leaf:
-            noisy_uid = self._project_to_random_leaf(noisy_uid)
-
-        return noisy_uid
-
-    def apply_filter(self, uid):
-        if self.filter_imprecise:
-            return self.graph.out_degree(uid) == 0  # noqa
-        else:
-            return True
-
-    def update(self, message: instrumentation.Message):
-        if isinstance(message, knowledge.RelationChangeMessage) or isinstance(
-            message, knowledge.ConceptChangeMessage
-        ):
-            self.is_updated = False

+ 0 - 136
chillax/methods/sample_weight_by_ic.py

@@ -1,136 +0,0 @@
-from chia.components.sample_transformers.sample_transformer import SampleTransformer
-from chia import knowledge
-from chia import instrumentation
-from chia import data
-
-from chillax import information_content
-
-import typing
-
-import numpy as np
-
-
-class SampleWeightByICSampleTransfomer(SampleTransformer, instrumentation.Observer):
-    """
-    SampleWeightByICSampleTransformer: Adds a training_weight to samples based on IC.
-
-    The update pattern for the KB is taken from chillax_extrapolator.
-    """
-
-    def __init__(
-        self,
-        kb: knowledge.KnowledgeBase,
-        ic_method: typing.Optional[str] = None,
-        coef_a=0.0,
-    ):
-        SampleTransformer.__init__(self, kb=kb)
-        instrumentation.Observer.__init__(self)
-
-        self.coef_a = coef_a
-
-        # Information Content
-        self.kb.register(self)
-        self.is_updated = False
-
-        self._ic_calc: information_content.InformationContentCalculator = (
-            information_content.InformationContentCalculatorFactory.create(
-                {"name": ic_method if ic_method is not None else "zhou_2008_modified"}
-            )
-        )
-        self._ic_cache = dict()
-        self.update_relations_and_concepts()
-
-    def transform(
-        self,
-        samples: typing.List[data.Sample],
-        is_training: bool,
-        label_resource_id: str,
-    ):
-        if not self.is_updated:
-            raise RuntimeError(
-                "This sample_weight_by_ic is not updated. "
-                "Please check if it is subscribed to "
-                "RelationChange and ConceptChange messages."
-            )
-
-        # A lot of this stuff assumes len(samples) > 0, so:
-        if len(samples) == 0:
-            return samples
-
-        # Calculate the IC of each sample
-        sample_ics = {
-            sample.get_resource("uid"): self._ic_cache[
-                sample.get_resource(label_resource_id)
-            ]
-            for sample in samples
-        }
-        ic_array = np.asarray(list(sample_ics.values()))
-
-        self.log_info(f'Information for {"training" if is_training else "test"} data:')
-        self.log_info(
-            f"IC mean        {np.mean(ic_array)}, std {np.std(ic_array)}, median {np.median(ic_array)}"
-        )
-        if not is_training:
-            self.log_debug(f"Not touching test data.")
-            return samples
-
-        sample_ics_exp = {
-            sample_uid: np.exp(sample_ic)
-            for sample_uid, sample_ic in sample_ics.items()
-        }
-        sample_ics_exp_tf = {
-            sample_uid: np.exp(self.coef_a * sample_ic)
-            for sample_uid, sample_ic in sample_ics.items()
-        }
-
-        ic_exp_array = np.asarray(list(sample_ics_exp.values()))
-        ic_exp_tf_array = np.asarray(list(sample_ics_exp_tf.values()))
-
-        self.log_info(
-            f"IC exp.    mean {np.mean(ic_exp_array)}, std {np.std(ic_exp_array)}, median {np.median(ic_exp_array)}"
-        )
-        self.log_info(
-            f"IC exp. tf mean {np.mean(ic_exp_tf_array)}, std {np.std(ic_exp_tf_array)}, median {np.median(ic_exp_tf_array)}"
-        )
-
-        ic_exp_tf_sum = np.sum(ic_exp_tf_array)
-
-        # Apply softmax
-        sample_weights = {
-            sample_uid: (sample_ic_exp_tf * len(samples)) / ic_exp_tf_sum
-            for sample_uid, sample_ic_exp_tf in sample_ics_exp_tf.items()
-        }
-        sample_weight_array = np.asarray(list(sample_weights.values()))
-        self.log_info(
-            f"Weight*cnt mean {np.mean(sample_weight_array)}, std {np.std(sample_weight_array)}, median {np.median(sample_weight_array)}"
-        )
-
-        return [
-            sample.add_resource(
-                self.__class__.__name__,
-                "training_weight",
-                sample_weights[sample.get_resource("uid")],
-            )
-            for sample in samples
-        ]
-
-    def update_relations_and_concepts(self):
-        try:
-            # Update Information Content Cache
-            self._ic_cache = dict()
-            rgraph = self.kb.get_hyponymy_relation_rgraph()
-            for concept in self.kb.concepts():
-                self._ic_cache[
-                    concept.uid
-                ] = self._ic_calc.calculate_information_content(concept.uid, rgraph)
-
-            self.is_updated = True
-        except ValueError as verr:
-            self.log_warning(f"Could not update sample_weight_by_ic. {verr.args}")
-
-    def update(self, message: instrumentation.Message):
-        if isinstance(message, knowledge.RelationChangeMessage) or isinstance(
-            message, knowledge.ConceptChangeMessage
-        ):
-            self.is_updated = False
-            self.update_relations_and_concepts()

+ 0 - 1
chillax/version.py

@@ -1 +0,0 @@
-__version__ = "0.1a16"

+ 0 - 68
examples/configuration.json

@@ -1,68 +0,0 @@
-{
-  "meta": {
-    "name": "example-experiment"
-  },
-  "evaluators": [
-    {
-      "name": "accuracy"
-    }
-  ],
-  "with_wordnet": true,
-  "interactor": {
-    "name": "noisy_oracle",
-    "noise_model": "Inaccuracy"
-  },
-  "observers": [
-    {
-      "name": "stream"
-    }
-  ],
-  "sample_transformers": [
-    {
-      "name": "sample_weight_by_ic"
-    }
-  ],
-  "runner": {
-    "name": "epoch",
-    "epochs": 2,
-    "max_test_samples": 4
-  },
-  "dataset": {
-    "name": "icifar"
-  },
-  "extrapolator": {
-    "name": "do_nothing",
-    "apply_ground_truth": true
-  },
-  "model": {
-    "classifier": {
-      "name": "chillax",
-      "l2": 5e-5,
-      "force_prediction_targets": true,
-      "raw_output": false
-    },
-    "base_model": {
-      "name": "keras",
-      "trainer": {
-        "name": "fast_single_shot",
-        "batch_size": 2,
-        "inner_steps": 11
-      },
-      "feature_extractor": {
-        "side_length": 32,
-        "trainable": true,
-        "architecture": "ResNet50V2",
-        "l2": 5e-5,
-        "use_pretrained_weights": null
-      },
-      "optimizer": {
-        "name": "sgd",
-        "momentum": 0.9
-      },
-      "learning_rate_schedule": {
-        "name": "constant",
-        "initial_lr": 0.01
-      }
-    }
-  }
-}

+ 0 - 33
setup.py

@@ -1,33 +0,0 @@
-from distutils import util
-
-from setuptools import find_packages, setup
-
-main_ns = {}
-ver_path = util.convert_path("chillax/version.py")
-
-with open(ver_path) as ver_file:
-    exec(ver_file.read(), main_ns)
-
-with open("README.md", "r") as fh:
-    long_description = fh.read()
-
-setup(
-    name="chillax",
-    version=main_ns["__version__"],
-    packages=find_packages(),
-    python_requires=">=3.7",
-    install_requires=[
-        "chia~=2.1.0",
-    ],
-    # metadata to display on PyPI
-    author="Clemens-Alexander Brust",
-    author_email="clemens-alexander.brust@uni-jena.de",
-    description="Concept Hierarchies for Imprecise Label Learning and Annotation eXtrapolation",
-    long_description=long_description,
-    long_description_content_type="text/markdown",
-    classifiers=[
-        "Programming Language :: Python :: 3.7",
-        "Programming Language :: Python :: 3.8",
-    ],
-    url="https://github.com/cvjena/chillax",
-)

+ 0 - 20
tests/test_experiment.py

@@ -1,20 +0,0 @@
-import os
-
-import config as pcfg
-import pytest
-
-from chia import containers, helpers, instrumentation
-from chia.components import classifiers
-
-
-def test_experiment():
-    """This tests runs the self-supervised experiment configuration once."""
-    from chillax import experiment_selfsupervised
-
-    example_config_files = ["examples/configuration.json"]
-
-    experiment_selfsupervised.main(example_config_files)
-
-
-if __name__ == "__main__":
-    test_experiment()