6
0

Pipeline.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. from os import path
  2. from typing import List
  3. from urllib.request import urlretrieve
  4. import cv2
  5. from pycs.interfaces.MediaFile import MediaFile
  6. from pycs.interfaces.MediaStorage import MediaStorage
  7. from pycs.interfaces.Pipeline import Pipeline as Interface
  8. class Pipeline(Interface):
  9. URL = 'https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml'
  10. def __init__(self, root_folder, distribution):
  11. print('hcffdv1 init')
  12. # get path to xml file
  13. xml_file = path.join(root_folder, 'haarcascade_frontalface_default.xml')
  14. # download
  15. if not path.exists(xml_file):
  16. urlretrieve(self.URL, xml_file)
  17. # load
  18. self.face_cascade = cv2.CascadeClassifier(xml_file)
  19. def close(self):
  20. print('hcffdv1 close')
  21. def collections(self) -> List[dict]:
  22. return [
  23. self.create_collection('face', 'face detected', autoselect=True),
  24. self.create_collection('none', 'no face detected')
  25. ]
  26. def execute(self, storage: MediaStorage, file: MediaFile):
  27. print('hcffdv1 execute')
  28. # load file and analyze frames
  29. found = False
  30. if file.type == 'image':
  31. found = self.__find(file, cv2.imread(file.path))
  32. else:
  33. video = cv2.VideoCapture(file.path)
  34. index = 0
  35. ret, image = video.read()
  36. while ret:
  37. if self.__find(file, image, index):
  38. found = True
  39. ret, image = video.read()
  40. index += 1
  41. video.release()
  42. # set file collection
  43. if found:
  44. file.set_collection('face')
  45. else:
  46. file.set_collection('none')
  47. def __find(self, file: MediaFile, image, frame=None):
  48. # convert to grayscale, scale down
  49. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  50. height, width = gray.shape
  51. scale_factor = min(2048 / width, 2048 / height, 1.0)
  52. scale_height, scale_width = int(height * scale_factor), int(width * scale_factor)
  53. scaled = cv2.resize(gray, (scale_width, scale_height))
  54. # detect faces
  55. faces = self.face_cascade.detectMultiScale(
  56. scaled,
  57. scaleFactor=1.1,
  58. minNeighbors=5,
  59. minSize=(192, 192)
  60. )
  61. # add faces to results
  62. for x, y, w, h in faces:
  63. file.add_bounding_box(x / scale_width,
  64. y / scale_height,
  65. w / scale_width,
  66. h / scale_height,
  67. frame=frame)
  68. return len(faces) > 0