6
0

PipelineManager.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. from os import getcwd
  2. from os import path
  3. from eventlet import tpool
  4. from pycs.pipeline.Job import Job
  5. from pycs.pipeline.tf1.pipeline import Pipeline as TF1Pipeline
  6. from pycs.projects.Project import Project
  7. class PipelineManager:
  8. def __init__(self, project: Project):
  9. self.project = project
  10. self.pipeline = tpool.execute(self.__load_pipeline, project['pipeline']['model-distribution'])
  11. def __enter__(self):
  12. return self
  13. def __exit__(self, type, value, traceback):
  14. self.pipeline.close()
  15. def run(self, media_file):
  16. # create job list
  17. # TODO update job progress
  18. job = Job('detect-faces', self.project['id'], media_file)
  19. result = tpool.execute(lambda p, j: p.execute(j), self.pipeline, job)
  20. # remove existing pipeline predictions from media_fle
  21. media_file.remove_pipeline_results()
  22. # add new predictions
  23. for prediction in result.predictions:
  24. media_file.add_result(prediction, origin='pipeline')
  25. def __load_pipeline(self, pipeline_identifier):
  26. model_distribution = self.project.parent.parent['models'][pipeline_identifier]
  27. if model_distribution['mode'] == 'tf1':
  28. model_root = path.join(getcwd(), 'models', model_distribution['name'])
  29. pipeline = TF1Pipeline()
  30. pipeline.load(model_root, model_distribution['pipeline'])
  31. return pipeline
  32. '''
  33. def __update(self, data):
  34. # get current project path
  35. opened_projects = list(filter(lambda x: x['status'] == 'open', data))
  36. if len(opened_projects) == 0:
  37. return
  38. current_project = opened_projects[0]
  39. # find images to predict
  40. if 'data' not in current_project.keys() or len(current_project['data']) == 0:
  41. return
  42. # load pipeline
  43. pipeline = tpool.execute(self.__load_pipeline, current_project['pipeline']['model-distribution'])
  44. # create job list
  45. for d in current_project['data']:
  46. print('keys:', d.keys())
  47. if 'result' not in d.keys():
  48. # TODO update job progress
  49. job = Job('detect-faces', current_project['id'], d)
  50. result = tpool.execute(lambda p, j: p.execute(j), pipeline, job)
  51. d['result'] = result.predictions
  52. # close pipeline
  53. pipeline.close()
  54. '''
  55. '''
  56. def __update(self, data):
  57. for current_project in data:
  58. print('>>>>>>>>>>')
  59. # find images to predict
  60. if 'data' not in current_project.keys() or len(current_project['data']) == 0:
  61. return
  62. # load pipeline
  63. pipeline = tpool.execute(self.__load_pipeline, current_project['pipeline']['model-distribution'])
  64. # create job list
  65. for d in current_project['data']:
  66. print('keys:', d.keys())
  67. if 'result' not in d.keys():
  68. # TODO update job progress
  69. job = Job('detect-faces', current_project['id'], d)
  70. result = tpool.execute(lambda p, j: p.execute(j), pipeline, job)
  71. d['result'] = result.predictions
  72. # close pipeline
  73. pipeline.close()
  74. '''