renderer.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. from math import ceil
  2. __all__ = ["Renderer"]
  3. STANDARD_DICT = {
  4. "Memory": "Memory",
  5. "#Proc": "#Proc",
  6. "Temp": "Temp",
  7. "Util": "Util",
  8. }
  9. SHORTENED_DICT = {
  10. "Memory": "Mem",
  11. "#Proc": "#Pr",
  12. "Temp": "T",
  13. "Util": "Ut",
  14. }
  15. def build_line(fill_char, length):
  16. return "|" + fill_char * (length - 2) + "|"
  17. def fill_line(line, length, fill_char=" "):
  18. return line + fill_char * (length - len(line) - 1) + "|"
  19. class Renderer:
  20. def __init__(
  21. self,
  22. progress_bar_width=50,
  23. columns=1,
  24. gpu_inner_spacings=True,
  25. gpu_outer_spacings=True,
  26. node_names=None,
  27. display_power=True,
  28. display_users=False,
  29. dict_type="default",
  30. available_gpus_only=False,
  31. ) -> None:
  32. assert dict_type in ("default", "shortened")
  33. self.progress_bar_width = progress_bar_width
  34. self.columns = columns
  35. self.gpu_inner_spacings = gpu_inner_spacings
  36. self.gpu_outer_spacings = gpu_outer_spacings
  37. self.node_names = node_names
  38. self.display_power = display_power
  39. self.display_users = display_users
  40. self.dict_type = dict_type
  41. self.available_gpus_only=available_gpus_only
  42. if dict_type == "default":
  43. self.act_dict = STANDARD_DICT
  44. else:
  45. self.act_dict = SHORTENED_DICT
  46. def render_info_dict(self, info_dict):
  47. line_blocks = []
  48. for node_dict in info_dict:
  49. if self.node_names is None or node_dict["name"] in self.node_names:
  50. lines = self.render_node(node_dict)
  51. if lines:
  52. line_blocks.append(lines)
  53. first_line = build_line("=", len(lines[-1]))
  54. final_lines = []
  55. print(len(line_blocks))
  56. n_rows = ceil(len(line_blocks) / self.columns)
  57. calc_index = lambda row, col, n_rows, n_cols: row * n_cols + col
  58. # Format rows and columns
  59. for row in range(n_rows):
  60. lines = []
  61. max_size = -1
  62. # Find max size of the blocks in the same row:
  63. for col in range(self.columns):
  64. idx = calc_index(row, col, n_rows, self.columns)
  65. if idx < len(line_blocks):
  66. if len(line_blocks[idx]) > max_size:
  67. max_size = len(line_blocks[idx])
  68. for col in range(self.columns):
  69. idx = calc_index(row, col, n_rows, self.columns)
  70. if idx < len(line_blocks):
  71. if len(lines) == 0:
  72. lines.extend(
  73. self.expand_rendered_block_to_size(
  74. line_blocks[idx],
  75. max_size
  76. )
  77. )
  78. else:
  79. for i, line in enumerate(self.expand_rendered_block_to_size(line_blocks[idx], max_size)):
  80. lines[i] += line
  81. final_lines.extend(lines)
  82. final_lines.insert(0, first_line * min(self.columns, len(line_blocks)))
  83. #lines.append("=" * len(lines[-1]))
  84. return "\n".join(final_lines)
  85. def render_node(self, node_dict):
  86. name = node_dict["name"]
  87. mem_used = node_dict["latest_info"]["used_memory_mb"]
  88. mem_total = node_dict["total_memory_mb"]
  89. utilization = node_dict["latest_info"]["cpu_utilization"]
  90. temp = node_dict["latest_info"]["temperature"]
  91. head_line = "|- Node: " + name + " "
  92. info_line = f"| CPU: {utilization:>4.1f}% {self.act_dict['Memory']}: {mem_used:>6}/{mem_total:<6} MB {self.act_dict['Temp']}: {temp:>3}°C"
  93. lines = []
  94. line_len = -1
  95. for i, gpu_dict in enumerate(node_dict["gpus"]):
  96. new_lines = self.get_rendered_gpu_lines(gpu_dict)
  97. if line_len == -1:
  98. line_len = len(new_lines[-1])
  99. if not self.available_gpus_only or \
  100. (len(gpu_dict["running_processes"]) == 0 and \
  101. gpu_dict["latest_info"]["used_memory_mb"] < 100):
  102. lines.extend(new_lines)
  103. # If no GPUs are available for this node, skip the GPU display
  104. if self.available_gpus_only and len(lines) == 0:
  105. return None
  106. head_line = fill_line(head_line, line_len, fill_char="-")
  107. info_line = fill_line(info_line, line_len)
  108. pad_line = build_line("-", line_len)
  109. pad_line_empty = build_line(" ", line_len)
  110. if self.gpu_outer_spacings:
  111. lines.append(pad_line_empty)
  112. lines.append(build_line("=", line_len))
  113. if self.gpu_outer_spacings:
  114. lines.insert(0, pad_line_empty)
  115. lines.insert(0, pad_line)
  116. if self.gpu_outer_spacings:
  117. lines.insert(0, pad_line_empty)
  118. lines.insert(0, info_line)
  119. if self.gpu_outer_spacings:
  120. lines.insert(0, pad_line_empty)
  121. lines.insert(0, head_line)
  122. return lines
  123. def get_rendered_gpu_lines(self, gpu_dict):
  124. gpu_type = self.maybe_shorten_gpu_name(gpu_dict["type"])
  125. index = gpu_dict["index"]
  126. mem_used = gpu_dict["latest_info"]["used_memory_mb"]
  127. mem_total = gpu_dict["total_memory_mb"]
  128. utilization = gpu_dict["latest_info"]["utilization"]
  129. temp = gpu_dict["latest_info"]["temperature"]
  130. n_processes = len(gpu_dict["running_processes"])
  131. power = gpu_dict["latest_info"]["power_draw"]
  132. mem_used_percent = int(self.progress_bar_width * mem_used / mem_total)
  133. rest_mem = self.progress_bar_width - mem_used_percent
  134. line_util = "| [" + \
  135. "=" * mem_used_percent + \
  136. " " * rest_mem + "]" + \
  137. f"{mem_used:>6}/{mem_total:<6} MB, {self.act_dict['Util']}: {int(utilization):>3}% |"
  138. line_meta = f"| " \
  139. f"GPU #{index} ({gpu_type}): " + \
  140. f"{self.act_dict['#Proc']}: {n_processes} " + \
  141. f"{self.act_dict['Temp']}: {temp:>3}°C " + \
  142. (f"Pow: {int(power):>3} W" if self.display_power else "")
  143. line_meta = fill_line(line_meta, len(line_util))
  144. empty_line = build_line(" ", len(line_meta))
  145. lines = [line_meta, line_util]
  146. if self.display_users:
  147. lines.append(self.get_rendered_users_line(gpu_dict["running_processes"], len(line_meta)))
  148. else:
  149. if self.gpu_inner_spacings:
  150. lines.append(empty_line)
  151. if self.gpu_outer_spacings:
  152. lines = [empty_line] + lines
  153. return lines
  154. def get_rendered_users_line(self, processes_list, line_len):
  155. user_names = [p_dict["user_name"] for p_dict in processes_list]
  156. line = "| Users: " + ", ".join(user_names)
  157. line = fill_line(line, line_len)
  158. return line
  159. def expand_rendered_block_to_size(self, block_lines, max_size):
  160. buffer_line = "|" + " " * (len(block_lines[-1]) - 2) + "|"
  161. block_lines_new = block_lines[:-1] + [buffer_line] * (max_size - len(block_lines)) + block_lines[-1:]
  162. return block_lines_new
  163. def maybe_shorten_gpu_name(self, name):
  164. if self.dict_type == "shortened":
  165. name = name.replace("GeForce", "").strip()
  166. if len(name) > 13:
  167. name = name[:10] + "..."
  168. return name