renderer.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. from math import ceil
  2. __all__ = ["Renderer"]
  3. STANDARD_DICT = {
  4. "Memory": "Memory",
  5. "#Proc": "#Proc",
  6. "Temp": "Temp",
  7. "Util": "Util",
  8. }
  9. SHORTENED_DICT = {
  10. "Memory": "Mem",
  11. "#Proc": "#Pr",
  12. "Temp": "T",
  13. "Util": "Ut",
  14. }
  15. def build_line(fill_char, length):
  16. return "|" + fill_char * (length - 2) + "|"
  17. def fill_line(line, length, fill_char=" "):
  18. return line + fill_char * (length - len(line) - 1) + "|"
  19. class Renderer:
  20. def __init__(
  21. self,
  22. progress_bar_width=50,
  23. columns=1,
  24. gpu_inner_spacings=True,
  25. gpu_outer_spacings=True,
  26. node_names=None,
  27. display_power=True,
  28. display_users=False,
  29. dict_type="default",
  30. available_gpus_only=False,
  31. ) -> None:
  32. assert dict_type in ("default", "shortened")
  33. self.progress_bar_width = progress_bar_width
  34. self.columns = columns
  35. self.gpu_inner_spacings = gpu_inner_spacings
  36. self.gpu_outer_spacings = gpu_outer_spacings
  37. self.node_names = node_names
  38. self.display_power = display_power
  39. self.display_users = display_users
  40. self.dict_type = dict_type
  41. self.available_gpus_only=available_gpus_only
  42. if dict_type == "default":
  43. self.act_dict = STANDARD_DICT
  44. else:
  45. self.act_dict = SHORTENED_DICT
  46. def render_info_dict(self, info_dict):
  47. line_blocks = []
  48. for node_dict in info_dict:
  49. if self.node_names is None or node_dict["name"] in self.node_names:
  50. lines = self.render_node(node_dict)
  51. if lines:
  52. line_blocks.append(lines)
  53. first_line = build_line("=", len(lines[-1]))
  54. final_lines = []
  55. n_rows = ceil(len(line_blocks) / self.columns)
  56. calc_index = lambda row, col, n_rows, n_cols: row * n_cols + col
  57. # Format rows and columns
  58. for row in range(n_rows):
  59. lines = []
  60. max_size = -1
  61. # Find max size of the blocks in the same row:
  62. for col in range(self.columns):
  63. idx = calc_index(row, col, n_rows, self.columns)
  64. if idx < len(line_blocks):
  65. if len(line_blocks[idx]) > max_size:
  66. max_size = len(line_blocks[idx])
  67. for col in range(self.columns):
  68. idx = calc_index(row, col, n_rows, self.columns)
  69. if idx < len(line_blocks):
  70. if len(lines) == 0:
  71. lines.extend(
  72. self.expand_rendered_block_to_size(
  73. line_blocks[idx],
  74. max_size
  75. )
  76. )
  77. else:
  78. for i, line in enumerate(self.expand_rendered_block_to_size(line_blocks[idx], max_size)):
  79. lines[i] += line
  80. final_lines.extend(lines)
  81. final_lines.insert(0, first_line * min(self.columns, len(line_blocks)))
  82. #lines.append("=" * len(lines[-1]))
  83. return "\n".join(final_lines)
  84. def render_node(self, node_dict):
  85. name = node_dict["name"]
  86. mem_used = node_dict["latest_info"]["used_memory_mb"]
  87. mem_total = node_dict["total_memory_mb"]
  88. utilization = node_dict["latest_info"]["cpu_utilization"]
  89. temp = node_dict["latest_info"]["temperature"]
  90. head_line = "|- Node: " + name + " "
  91. info_line = f"| CPU: {utilization:>4.1f}% {self.act_dict['Memory']}: {mem_used:>6}/{mem_total:<6} MB {self.act_dict['Temp']}: {temp:>3}°C"
  92. lines = []
  93. line_len = -1
  94. for i, gpu_dict in enumerate(node_dict["gpus"]):
  95. new_lines = self.get_rendered_gpu_lines(gpu_dict)
  96. if line_len == -1:
  97. line_len = len(new_lines[-1])
  98. if not self.available_gpus_only or \
  99. (len(gpu_dict["running_processes"]) == 0 and \
  100. gpu_dict["latest_info"]["used_memory_mb"] < 100):
  101. lines.extend(new_lines)
  102. # If no GPUs are available for this node, skip the GPU display
  103. if self.available_gpus_only and len(lines) == 0:
  104. return None
  105. head_line = fill_line(head_line, line_len, fill_char="-")
  106. info_line = fill_line(info_line, line_len)
  107. pad_line = build_line("-", line_len)
  108. pad_line_empty = build_line(" ", line_len)
  109. if self.gpu_outer_spacings:
  110. lines.append(pad_line_empty)
  111. lines.append(build_line("=", line_len))
  112. if self.gpu_outer_spacings:
  113. lines.insert(0, pad_line_empty)
  114. lines.insert(0, pad_line)
  115. if self.gpu_outer_spacings:
  116. lines.insert(0, pad_line_empty)
  117. lines.insert(0, info_line)
  118. if self.gpu_outer_spacings:
  119. lines.insert(0, pad_line_empty)
  120. lines.insert(0, head_line)
  121. return lines
  122. def get_rendered_gpu_lines(self, gpu_dict):
  123. gpu_type = self.maybe_shorten_gpu_name(gpu_dict["type"])
  124. index = gpu_dict["index"]
  125. mem_used = gpu_dict["latest_info"]["used_memory_mb"]
  126. mem_total = gpu_dict["total_memory_mb"]
  127. utilization = gpu_dict["latest_info"]["utilization"]
  128. temp = gpu_dict["latest_info"]["temperature"]
  129. n_processes = len(gpu_dict["running_processes"])
  130. power = gpu_dict["latest_info"]["power_draw"]
  131. mem_used_percent = int(self.progress_bar_width * mem_used / mem_total)
  132. rest_mem = self.progress_bar_width - mem_used_percent
  133. line_util = "| [" + \
  134. "=" * mem_used_percent + \
  135. " " * rest_mem + "]" + \
  136. f"{mem_used:>6}/{mem_total:<6} MB, {self.act_dict['Util']}: {int(utilization):>3}% |"
  137. line_meta = f"| " \
  138. f"GPU #{index} ({gpu_type}): " + \
  139. f"{self.act_dict['#Proc']}: {n_processes} " + \
  140. f"{self.act_dict['Temp']}: {temp:>3}°C " + \
  141. (f"Pow: {int(power):>3} W" if self.display_power else "")
  142. line_meta = fill_line(line_meta, len(line_util))
  143. empty_line = build_line(" ", len(line_meta))
  144. lines = [line_meta, line_util]
  145. if self.display_users:
  146. lines.append(self.get_rendered_users_line(gpu_dict["running_processes"], len(line_meta)))
  147. else:
  148. if self.gpu_inner_spacings:
  149. lines.append(empty_line)
  150. if self.gpu_outer_spacings:
  151. lines = [empty_line] + lines
  152. return lines
  153. def get_rendered_users_line(self, processes_list, line_len):
  154. user_names = [p_dict["user_name"] for p_dict in processes_list]
  155. line = "| Users: " + ", ".join(user_names)
  156. line = fill_line(line, line_len)
  157. return line
  158. def expand_rendered_block_to_size(self, block_lines, max_size):
  159. buffer_line = "|" + " " * (len(block_lines[-1]) - 2) + "|"
  160. block_lines_new = block_lines[:-1] + [buffer_line] * (max_size - len(block_lines)) + block_lines[-1:]
  161. return block_lines_new
  162. def maybe_shorten_gpu_name(self, name):
  163. if self.dict_type == "shortened":
  164. name = name.replace("GeForce", "").strip()
  165. if len(name) > 13:
  166. name = name[:10] + "..."
  167. return name