renderer.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. from math import ceil
  2. __all__ = ["Renderer"]
  3. STANDARD_DICT = {
  4. "Memory": "Memory",
  5. "#Proc": "#Proc",
  6. "Temp": "Temp",
  7. "Util": "Util",
  8. }
  9. SHORTENED_DICT = {
  10. "Memory": "Mem",
  11. "#Proc": "#Pr",
  12. "Temp": "T",
  13. "Util": "Ut",
  14. }
  15. def build_line(fill_char, length):
  16. return "|" + fill_char * (length - 2) + "|"
  17. def fill_line(line, length, fill_char=" "):
  18. return line + fill_char * (length - len(line) - 1) + "|"
  19. class Renderer:
  20. def __init__(
  21. self,
  22. progress_bar_width=50,
  23. columns=1,
  24. gpu_inner_spacings=True,
  25. gpu_outer_spacings=True,
  26. node_names=None,
  27. display_power=True,
  28. display_users=False,
  29. dict_type="default",
  30. available_gpus_only=False,
  31. ) -> None:
  32. assert dict_type in ("default", "shortened")
  33. self.progress_bar_width = progress_bar_width
  34. self.columns = columns
  35. self.gpu_inner_spacings = gpu_inner_spacings
  36. self.gpu_outer_spacings = gpu_outer_spacings
  37. self.node_names = node_names
  38. self.display_power = display_power
  39. self.display_users = display_users
  40. self.dict_type = dict_type
  41. self.available_gpus_only=available_gpus_only
  42. if dict_type == "default":
  43. self.act_dict = STANDARD_DICT
  44. else:
  45. self.act_dict = SHORTENED_DICT
  46. def render_info_dict(self, info_dict):
  47. line_blocks = []
  48. for node_dict in info_dict:
  49. if self.node_names is None or node_dict["name"] in self.node_names:
  50. lines = self.render_node(node_dict)
  51. if lines:
  52. line_blocks.append(lines)
  53. first_line = build_line("=", len(lines[-1]))
  54. final_lines = []
  55. n_rows = ceil(len(line_blocks) / self.columns)
  56. # Format rows and columns
  57. for row in range(n_rows):
  58. lines = []
  59. max_size = -1
  60. # Find max size of the blocks in the same row:
  61. for col in range(self.columns):
  62. if col * n_rows + row < len(line_blocks):
  63. if len(line_blocks[col * n_rows + row]) > max_size:
  64. max_size = len(line_blocks[col * n_rows + row])
  65. for col in range(self.columns):
  66. if col * n_rows + row < len(line_blocks):
  67. if len(lines) == 0:
  68. lines.extend(
  69. self.expand_rendered_block_to_size(
  70. line_blocks[col * n_rows + row],
  71. max_size
  72. )
  73. )
  74. else:
  75. for i, line in enumerate(self.expand_rendered_block_to_size(line_blocks[col * n_rows + row], max_size)):
  76. lines[i] += line
  77. final_lines.extend(lines)
  78. final_lines.insert(0, first_line * min(self.columns, len(line_blocks)))
  79. #lines.append("=" * len(lines[-1]))
  80. return "\n".join(final_lines)
  81. def render_node(self, node_dict):
  82. name = node_dict["name"]
  83. mem_used = node_dict["latest_info"]["used_memory_mb"]
  84. mem_total = node_dict["total_memory_mb"]
  85. utilization = node_dict["latest_info"]["cpu_utilization"]
  86. temp = node_dict["latest_info"]["temperature"]
  87. head_line = "|- Node: " + name + " "
  88. info_line = f"| CPU: {utilization:>4.1f}% {self.act_dict['Memory']}: {mem_used:>6}/{mem_total:<6} MB {self.act_dict['Temp']}: {temp:>3}°C"
  89. lines = []
  90. line_len = -1
  91. for i, gpu_dict in enumerate(node_dict["gpus"]):
  92. new_lines = self.get_rendered_gpu_lines(gpu_dict)
  93. if line_len == -1:
  94. line_len = len(new_lines[-1])
  95. if not self.available_gpus_only or \
  96. (len(gpu_dict["running_processes"]) == 0 and \
  97. gpu_dict["latest_info"]["used_memory_mb"] < 100):
  98. lines.extend(new_lines)
  99. # If no GPUs are available for this node, skip the GPU display
  100. if self.available_gpus_only and len(lines) == 0:
  101. return None
  102. head_line = fill_line(head_line, line_len, fill_char="-")
  103. info_line = fill_line(info_line, line_len)
  104. pad_line = build_line("-", line_len)
  105. pad_line_empty = build_line(" ", line_len)
  106. if self.gpu_outer_spacings:
  107. lines.append(pad_line_empty)
  108. lines.append(build_line("=", line_len))
  109. if self.gpu_outer_spacings:
  110. lines.insert(0, pad_line_empty)
  111. lines.insert(0, pad_line)
  112. if self.gpu_outer_spacings:
  113. lines.insert(0, pad_line_empty)
  114. lines.insert(0, info_line)
  115. if self.gpu_outer_spacings:
  116. lines.insert(0, pad_line_empty)
  117. lines.insert(0, head_line)
  118. return lines
  119. def get_rendered_gpu_lines(self, gpu_dict):
  120. gpu_type = self.maybe_shorten_gpu_name(gpu_dict["type"])
  121. index = gpu_dict["index"]
  122. mem_used = gpu_dict["latest_info"]["used_memory_mb"]
  123. mem_total = gpu_dict["total_memory_mb"]
  124. utilization = gpu_dict["latest_info"]["utilization"]
  125. temp = gpu_dict["latest_info"]["temperature"]
  126. n_processes = len(gpu_dict["running_processes"])
  127. power = gpu_dict["latest_info"]["power_draw"]
  128. mem_used_percent = int(self.progress_bar_width * mem_used / mem_total)
  129. rest_mem = self.progress_bar_width - mem_used_percent
  130. line_util = "| [" + \
  131. "=" * mem_used_percent + \
  132. " " * rest_mem + "]" + \
  133. f"{mem_used:>6}/{mem_total:<6} MB, {self.act_dict['Util']}: {int(utilization):>3}% |"
  134. line_meta = f"| " \
  135. f"GPU #{index} ({gpu_type}): " + \
  136. f"{self.act_dict['#Proc']}: {n_processes} " + \
  137. f"{self.act_dict['Temp']}: {temp:>3}°C " + \
  138. (f"Pow: {int(power):>3} W" if self.display_power else "")
  139. line_meta = fill_line(line_meta, len(line_util))
  140. empty_line = build_line(" ", len(line_meta))
  141. lines = [line_meta, line_util]
  142. if self.display_users:
  143. lines.append(self.get_rendered_users_line(gpu_dict["running_processes"], len(line_meta)))
  144. else:
  145. if self.gpu_inner_spacings:
  146. lines.append(empty_line)
  147. if self.gpu_outer_spacings:
  148. lines = [empty_line] + lines
  149. return lines
  150. def get_rendered_users_line(self, processes_list, line_len):
  151. user_names = [p_dict["user_name"] for p_dict in processes_list]
  152. line = "| Users: " + ", ".join(user_names)
  153. line = fill_line(line, line_len)
  154. return line
  155. def expand_rendered_block_to_size(self, block_lines, max_size):
  156. buffer_line = "|" + " " * (len(block_lines[-1]) - 2) + "|"
  157. block_lines_new = block_lines[:-1] + [buffer_line] * (max_size - len(block_lines)) + block_lines[-1:]
  158. return block_lines_new
  159. def maybe_shorten_gpu_name(self, name):
  160. if self.dict_type == "shortened":
  161. name = name.replace("GeForce", "").strip()
  162. if len(name) > 13:
  163. name = name[:10] + "..."
  164. return name