Pipeline.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. from os import path
  2. from typing import List
  3. from urllib.request import urlretrieve
  4. import cv2
  5. from pycs import app
  6. from pycs.interfaces.MediaFile import MediaFile
  7. from pycs.interfaces.MediaStorage import MediaStorage
  8. from pycs.interfaces.Pipeline import Pipeline as Interface
  9. class Pipeline(Interface):
  10. URL = 'https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml'
  11. def __init__(self, root_folder, distribution):
  12. app.logger.debug('hcffdv1 init')
  13. # get path to xml file
  14. xml_file = path.join(root_folder, 'haarcascade_frontalface_default.xml')
  15. # download
  16. if not path.exists(xml_file):
  17. urlretrieve(self.URL, xml_file)
  18. # load
  19. self.face_cascade = cv2.CascadeClassifier(xml_file)
  20. def close(self):
  21. app.logger.debug('hcffdv1 close')
  22. def collections(self) -> List[dict]:
  23. return [
  24. self.create_collection('face', 'face detected', autoselect=True),
  25. self.create_collection('none', 'no face detected')
  26. ]
  27. def execute(self, storage: MediaStorage, file: MediaFile):
  28. app.logger.debug('hcffdv1 execute')
  29. # load file and analyze frames
  30. found = False
  31. if file.type == 'image':
  32. found = self.__find(file, cv2.imread(file.path))
  33. else:
  34. video = cv2.VideoCapture(file.path)
  35. index = 0
  36. ret, image = video.read()
  37. while ret:
  38. if self.__find(file, image, index):
  39. found = True
  40. ret, image = video.read()
  41. index += 1
  42. video.release()
  43. # set file collection
  44. if found:
  45. file.set_collection('face')
  46. else:
  47. file.set_collection('none')
  48. def __find(self, file: MediaFile, image, frame=None):
  49. # convert to grayscale, scale down
  50. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  51. height, width = gray.shape
  52. scale_factor = min(2048 / width, 2048 / height, 1.0)
  53. scale_height, scale_width = int(height * scale_factor), int(width * scale_factor)
  54. scaled = cv2.resize(gray, (scale_width, scale_height))
  55. # detect faces
  56. faces = self.face_cascade.detectMultiScale(
  57. scaled,
  58. scaleFactor=1.1,
  59. minNeighbors=5,
  60. minSize=(192, 192)
  61. )
  62. # add faces to results
  63. for x, y, w, h in faces:
  64. file.add_bounding_box(x / scale_width,
  65. y / scale_height,
  66. w / scale_width,
  67. h / scale_height,
  68. frame=frame)
  69. return len(faces) > 0