Eric Tröbs 3 years ago
parent
commit
59603d7eba

+ 8 - 0
docker-compose.yml

@@ -0,0 +1,8 @@
+version: '3'
+
+services:
+  web:
+    build: .
+    command: venv/bin/python app.py
+    ports:
+      - "5000:5000"

+ 31 - 0
models/moth_scanner/configuration.json

@@ -0,0 +1,31 @@
+{
+  "name": "Moth detector and classifier",
+  "description": "Moth scanner (detection and classification) of moths developed in the context of the AMMOD project.",
+  "supports": [
+    "labeled-bounding-boxes"
+  ],
+  "code": {
+    "module": "scanner",
+    "class": "Scanner"
+  },
+  "detector": {
+    "preprocess":{
+      "min_size": 800,
+      "scale": 0.1,
+      "sigma": 5.0
+    },
+    "threshold": {
+      "block_size_scale": 0.5
+    },
+    "postprocess":{
+      "dilate_iterations": 3,
+      "kernel_size": 5
+    }
+  },
+  "classifier": {
+    "model_type": "cvmodelz.InceptionV3",
+    "input_size": 299,
+    "weights": "classifier.npz",
+    "n_classes": 200
+  }
+}

+ 37 - 0
models/moth_scanner/scanner/__init__.py

@@ -0,0 +1,37 @@
+import cv2
+import numpy as np
+
+from json import dump, load
+
+from pycs.interfaces.MediaFile import MediaFile
+from pycs.interfaces.MediaStorage import MediaStorage
+from pycs.interfaces.Pipeline import Pipeline as Interface
+
+from .detector import Detector
+from .classifier import Classifier
+
+class Scanner(Interface):
+    def __init__(self, root_folder: str, configuration: dict):
+        super().__init__(root_folder, configuration)
+        self.detector = Detector(configuration["detector"])
+        self.classifier = Classifier(configuration["classifier"], root=root_folder)
+
+    def close(self):
+        pass
+
+    def execute(self, storage: MediaStorage, file: MediaFile):
+
+        im = self.read_image(file.path)
+        bw_im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
+
+        detections = self.detector(bw_im)
+
+        for bbox, info in detections:
+            if not info.selected:
+                continue
+            x0, y0, x1, y1 = bbox
+            label = self.classifier(bbox.crop(im, enlarge=True))
+            file.add_bounding_box(x0, y0, bbox.w, bbox.h, label=label)
+
+    def read_image(self, path: str, mode: int = cv2.IMREAD_COLOR) -> np.ndarray:
+        return cv2.imread(path, mode)

+ 56 - 0
models/moth_scanner/scanner/classifier.py

@@ -0,0 +1,56 @@
+import numpy as np
+import typing as T
+
+from munch import munchify
+from pathlib import Path
+
+from cvmodelz.models import ModelFactory
+from chainercv import transforms as tr
+
+class Classifier(object):
+
+    def __init__(self, configuration: T.Dict, root: str):
+        super().__init__()
+
+        config = munchify(configuration)
+
+        model_type = config.model_type
+        n_classes = config.n_classes
+        weights = Path(root, config.weights).resolve()
+
+        self.input_size = config.input_size
+        self.backbone = ModelFactory.new(model_type)
+        self.backbone.load_for_inference(weights,
+                                         n_classes=n_classes,
+                                         path="model/",
+                                         strict=True,
+                                        )
+
+    def _transform(self, im: np.ndarray):
+        _prepare = self.backbone.meta.prepare_func
+        size = (self.input_size, self.input_size)
+
+        # print(f"{'Orig:': <14s} {im.shape=}")
+        im = _prepare(im, size=size, keep_ratio=True, swap_channels=False)
+        # print(f"{'Prepare:': <14s} {im.shape=}")
+        im = tr.center_crop(im, size)
+        # print(f"{'CenterCrop:': <14s} {im.shape=}")
+        return im
+
+
+    def __call__(self, im: np.ndarray) -> int:
+        assert im.ndim in (3, 4), \
+            "Classifier accepts only RGB images (3D input) or a batch of images (4D input)!"
+
+        if im.ndim == 3:
+            # expand first axis
+            # CxHxW -> 1xCxHxW
+            im = im[None]
+
+        im = [self._transform(_im) for _im in im]
+        x = self.backbone.xp.array(im)
+        pred = self.backbone(x)
+        pred.to_cpu()
+
+        return int(np.argmax(pred.array, axis=1))
+

+ 213 - 0
models/moth_scanner/scanner/detector.py

@@ -0,0 +1,213 @@
+import cv2
+import numpy as np
+import typing as T
+
+from munch import munchify
+from collections import namedtuple
+from skimage import filters
+
+# the coordinates are relative!
+BBoxInfo = namedtuple("BBoxInfo", "area ratio mean std selected", defaults=[-1, -1, -1, -1, False])
+Detection = namedtuple("Detection", "bbox info")
+
+class BBox(namedtuple("BBox", "x0 y0 x1 y1")):
+    __slots__ = ()
+
+    @property
+    def w(self):
+        return abs(self.x1 - self.x0)
+
+    @property
+    def h(self):
+        return abs(self.y1 - self.y0)
+
+
+    @property
+    def area(self):
+        return self.h * self.w
+
+    @property
+    def ratio(self):
+        return min(self.h, self.w) / max(self.h, self.w)
+
+    def crop(self, im: np.ndarray, enlarge: bool = True):
+
+        x0, y0, x1, y1 = self
+        H, W, *_ = im.shape
+
+        # translate from relative coordinates to pixel
+        # coordinates for the given image
+
+        x0, x1 = int(x0 * W), int(x1 * W)
+        y0, y1 = int(y0 * H), int(y1 * H)
+
+        # enlarge to a square extent
+        if enlarge:
+            h, w = int(self.h * H), int(self.h * W)
+            size = max(h, w)
+            dw, dh = (size - w) / 2, (size - h) / 2
+            x0, x1 = max(int(x0 - dw), 0), int(x0 - dw + size)
+            y0, y1 = max(int(y0 - dh), 0), int(y0 - dh + size)
+
+        if im.ndim == 2:
+            return im[y0:y1, x0:x1]
+
+        elif im.ndim == 3:
+            return im[y0:y1, x0:x1, :]
+
+        else:
+            ValueError(f"Unsupported ndims: {im.ndims=}")
+
+
+class Detector(object):
+
+
+    def __init__(self, configuration: T.Dict[str, T.Dict]) -> None:
+        super().__init__()
+        config = munchify(configuration)
+
+        self.scale: float = config.preprocess.scale
+        self.min_size: int = config.preprocess.min_size
+        self.sigma: float = config.preprocess.sigma
+
+        self.block_size_scale: float = config.threshold.block_size_scale
+
+        self.dilate_iterations: int = config.postprocess.dilate_iterations
+        self.kernel_size: int = config.postprocess.kernel_size
+
+
+    def __call__(self, im: np.ndarray) -> T.List[Detection]:
+
+        _im = self.rescale(im)
+
+        im0 = self.preprocess(_im)
+        im1 = self.threshold(im0)
+        im2 = self.postprocess(im1)
+
+        bboxes = self.detect(im2)
+
+        return self.postprocess_boxes(_im, bboxes)
+
+
+    def detect(self, im: np.ndarray) -> T.List[BBox]:
+
+        contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+        contours = sorted(contours, key=cv2.contourArea, reverse=True)
+
+        return [_contour2bbox(c, im.shape) for c in contours]
+
+
+    def rescale(self, im: np.ndarray) -> np.ndarray:
+
+        H, W = im.shape
+        _scale = self.min_size / min(H, W)
+        scale = max(self.scale, min(1, _scale))
+        size = int(W * scale), int(H * scale)
+
+        return cv2.resize(im, dsize=size)
+
+
+    def preprocess(self, im: np.ndarray) -> np.ndarray:
+        res = filters.gaussian(im, sigma=self.sigma, preserve_range=True)
+        return res.astype(im.dtype)
+
+
+    def threshold(self, im: np.ndarray) -> np.ndarray:
+        block_size_scale = self.block_size_scale
+
+        # make block size an odd number
+        block_size = min(im.shape) * block_size_scale // 2 * 2 + 1
+
+        thresh = filters.threshold_local(im,
+            block_size=block_size,
+            mode="constant",
+        )
+
+        max_value = 255
+        bin_im = ((im > thresh) * max_value).astype(np.uint8)
+        return max_value - bin_im
+
+
+    def postprocess(self, im: np.ndarray) -> np.ndarray:
+        kernel_size = self.kernel_size
+        iterations = self.dilate_iterations
+        kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
+
+        im = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
+        im = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
+
+        if iterations >= 1:
+            im = cv2.erode(im, kernel, iterations=iterations)
+            im = cv2.dilate(im, kernel, iterations=iterations)
+
+        return im
+
+
+    def postprocess_boxes(self, im: np.ndarray, bboxes: T.List[BBox]):
+
+        detections = [Detection(bbox, BBoxInfo()) for bbox in bboxes]
+
+        _im = im.astype(np.float64) / 255.
+        integral, integral_sq = cv2.integral2(_im)
+        # im_mean, im_std, im_n = _im_mean_std(integral, integral_sq)
+
+        inds = cv2.dnn.NMSBoxes([[x0, y0, x1-x0, y1-y0] for (x0, y0, x1, y1) in bboxes],
+                            np.ones(len(bboxes), dtype=np.float32),
+                            score_threshold=0.99,
+                            nms_threshold=0.1,
+                           )
+
+        # calculate the BBoxInfos only for the selected and update the detections
+        for i in inds.squeeze():
+            bbox, _ = detections[i]
+            mean, std, n = _im_mean_std(integral, integral_sq, bbox)
+            area, ratio = bbox.area, bbox.ratio
+            selected = self.is_selected(mean, std, ratio, area)
+            info = BBoxInfo(mean, std, area, ratio, selected)
+            detections[i] = Detection(bbox, info)
+
+        return detections
+
+    def is_selected(self, mean: float, std: float, ratio: float, area: float) -> bool:
+        # Caution, here are some magic numbers!
+        return \
+            std >= 5e-2 and \
+            ratio >= 2.5e-1 and \
+            4e-4 <= area <= 1/9
+
+def _contour2bbox(contour: np.ndarray, shape: T.Tuple[int, int]) -> BBox:
+    """ Gets the maximal extent of a contour and translates it to a bounding box. """
+    x0, y0 = contour.min(axis=0)[0].astype(np.int32)
+    x1, y1 = contour.max(axis=0)[0].astype(np.int32)
+
+    h, w = shape
+    return BBox(x0/w, y0/h, x1/w, y1/h)
+
+
+def _im_mean_std(integral: np.ndarray,
+                 integral_sq: np.ndarray,
+                 bbox: T.Optional[BBox] = None
+                ) -> T.Tuple[float, float, int]:
+
+    h, w = integral.shape[0] - 1, integral.shape[1] - 1
+
+    if bbox is None:
+        arr_sum = integral[-1, -1]
+        arr_sum_sq = integral_sq[-1, -1]
+        N = h * w
+
+    else:
+        x0, y0, x1, y1 = bbox
+        x0, x1 = int(x0 * w), int(x1 * w)
+        y0, y1 = int(y0 * h), int(y1 * h)
+
+        A, B, C, D = (y0,x0), (y1,x0), (y0,x1), (y1,x1)
+        arr_sum = integral[D] + integral[A] - integral[B] - integral[C]
+        arr_sum_sq = integral_sq[D] + integral_sq[A] - integral_sq[B] - integral_sq[C]
+
+        N = (x1-x0) * (y1-y0)
+
+    arr_mean = arr_sum / N
+    arr_std  = np.sqrt((arr_sum_sq - (arr_sum**2) / N) / N)
+
+    return arr_mean, arr_std, N

+ 5 - 5
pycs/frontend/endpoints/data/UploadFile.py

@@ -34,11 +34,11 @@ class UploadFile(View):
         project = self.db.project(identifier)
 
         if project is None:
-            return abort(404)
+            return abort(404, "Project not found")
 
         # abort if external storage is used
         if project.external_data:
-            return abort(400)
+            return abort(400, "Project uses external data, but a file was uploaded")
 
         # get upload path and id
         self.data_folder = project.data_folder
@@ -50,14 +50,14 @@ class UploadFile(View):
 
         # abort if there is no file entry in uploaded data
         if 'file' not in files.keys():
-            return abort(400)
+            return abort(400, "No file entry was found in uploaded data")
 
         # detect file type
         try:
             ftype, frames, fps = tpool.execute(file_info,
                                                self.data_folder, self.file_id, self.file_extension)
-        except ValueError:
-            return abort(400)
+        except ValueError as e:
+            return abort(400, str(e))
 
         # add to project files
         with self.db:

+ 5 - 3
pycs/util/FileParser.py

@@ -14,12 +14,14 @@ def file_info(data_folder: str, file_name: str, file_ext: str):
     :return: file type, frame count, frames per second
     """
     # determine file type
-    if file_ext in ['.jpg', '.png']:
+    if file_ext.lower() in ['.jpg', '.png']:
         ftype = 'image'
-    elif file_ext in ['.mp4']:
+
+    elif file_ext.lower() in ['.mp4']:
         ftype = 'video'
+
     else:
-        raise ValueError
+        raise ValueError(f"Unsupported file extension: {file_ext}!")
 
     # determine frames and fps for video files
     if ftype == 'image':

+ 13 - 0
requirements.txt

@@ -0,0 +1,13 @@
+numpy
+opencv-python
+Pillow
+scipy
+eventlet
+flask
+python-socketio
+munch
+scikit-image
+
+chainer~=7.8
+chainer-addons~=0.10
+cvmodelz~=0.1