|
@@ -1,22 +1,56 @@
|
|
|
from math import ceil
|
|
|
|
|
|
+__all__ = ["Renderer"]
|
|
|
+
|
|
|
+STANDARD_DICT = {
|
|
|
+ "Memory": "Memory",
|
|
|
+ "#Proc": "#Proc",
|
|
|
+ "Temp": "Temp",
|
|
|
+ "Util": "Util",
|
|
|
+}
|
|
|
+
|
|
|
+SHORTENED_DICT = {
|
|
|
+ "Memory": "Mem",
|
|
|
+ "#Proc": "#Pr",
|
|
|
+ "Temp": "T",
|
|
|
+ "Util": "Ut",
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+def build_line(fill_char, length):
|
|
|
+ return "|" + fill_char * (length - 2) + "|"
|
|
|
+
|
|
|
+def fill_line(line, length, fill_char=" "):
|
|
|
+ return line + fill_char * (length - len(line) - 1) + "|"
|
|
|
+
|
|
|
|
|
|
class Renderer:
|
|
|
def __init__(
|
|
|
self,
|
|
|
progress_bar_width=50,
|
|
|
columns=1,
|
|
|
- use_space_lines=True,
|
|
|
+ gpu_inner_spacings=True,
|
|
|
+ gpu_outer_spacings=True,
|
|
|
node_names=None,
|
|
|
display_power=True,
|
|
|
display_users=False,
|
|
|
+ dict_type="default",
|
|
|
) -> None:
|
|
|
+ assert dict_type in ("default", "shortened")
|
|
|
+
|
|
|
self.progress_bar_width = progress_bar_width
|
|
|
self.columns = columns
|
|
|
- self.use_space_lines = use_space_lines
|
|
|
+ self.gpu_inner_spacings = gpu_inner_spacings
|
|
|
+ self.gpu_outer_spacings = gpu_outer_spacings
|
|
|
self.node_names = node_names
|
|
|
self.display_power = display_power
|
|
|
self.display_users = display_users
|
|
|
+ self.dict_type = dict_type
|
|
|
+
|
|
|
+ if dict_type == "default":
|
|
|
+ self.act_dict = STANDARD_DICT
|
|
|
+ else:
|
|
|
+ self.act_dict = SHORTENED_DICT
|
|
|
|
|
|
def render_info_dict(self, info_dict):
|
|
|
line_blocks = []
|
|
@@ -27,7 +61,7 @@ class Renderer:
|
|
|
|
|
|
line_blocks.append(lines)
|
|
|
|
|
|
- first_line = "|" + "=" * (len(lines[-1]) - 2) + "|"
|
|
|
+ first_line = build_line("=", len(lines[-1]))
|
|
|
|
|
|
final_lines = []
|
|
|
|
|
@@ -74,35 +108,36 @@ class Renderer:
|
|
|
temp = node_dict["latest_info"]["temperature"]
|
|
|
|
|
|
head_line = "|- Node: " + name + " "
|
|
|
- info_line = f"| CPU: {utilization:>4.1f}% Memory: {mem_used:>6}/{mem_total:<6} MB Temp: {temp:>3}°C"
|
|
|
+ info_line = f"| CPU: {utilization:>4.1f}% {self.act_dict['Memory']}: {mem_used:>6}/{mem_total:<6} MB {self.act_dict['Temp']}: {temp:>3}°C"
|
|
|
|
|
|
lines = []
|
|
|
|
|
|
for i, gpu_dict in enumerate(node_dict["gpus"]):
|
|
|
lines.extend(self.get_rendered_gpu_lines(gpu_dict))
|
|
|
|
|
|
- head_line = head_line + "-" * (len(lines[-1]) - len(head_line) - 1) + "|"
|
|
|
- info_line = info_line + " " * (len(lines[-1]) - len(info_line) - 1) + "|"
|
|
|
- pad_line = "|" + "-" * (len(lines[-1]) - 2) + "|"
|
|
|
- pad_line_empty = "|" + " " * (len(lines[-1]) - 2) + "|"
|
|
|
+ head_line = fill_line(head_line, len(lines[-1]), fill_char="-")
|
|
|
+ info_line = fill_line(info_line, len(lines[-1]))
|
|
|
+ pad_line = build_line("-", len(lines[-1]))
|
|
|
+ pad_line_empty = build_line(" ", len(lines[-1]))
|
|
|
|
|
|
- if self.use_space_lines:
|
|
|
+ if self.gpu_outer_spacings:
|
|
|
lines.append(pad_line_empty)
|
|
|
- lines.append("|" + "=" * (len(lines[-1]) - 2) + "|")
|
|
|
+ lines.append(build_line("=", len(lines[-1])))
|
|
|
|
|
|
- lines.insert(0, pad_line_empty)
|
|
|
+ if self.gpu_outer_spacings:
|
|
|
+ lines.insert(0, pad_line_empty)
|
|
|
lines.insert(0, pad_line)
|
|
|
- if self.use_space_lines:
|
|
|
+ if self.gpu_outer_spacings:
|
|
|
lines.insert(0, pad_line_empty)
|
|
|
lines.insert(0, info_line)
|
|
|
- if self.use_space_lines:
|
|
|
+ if self.gpu_outer_spacings:
|
|
|
lines.insert(0, pad_line_empty)
|
|
|
lines.insert(0, head_line)
|
|
|
|
|
|
return lines
|
|
|
|
|
|
def get_rendered_gpu_lines(self, gpu_dict):
|
|
|
- gpu_type = gpu_dict["type"]
|
|
|
+ gpu_type = self.maybe_shorten_gpu_name(gpu_dict["type"])
|
|
|
index = gpu_dict["index"]
|
|
|
mem_used = gpu_dict["latest_info"]["used_memory_mb"]
|
|
|
mem_total = gpu_dict["total_memory_mb"]
|
|
@@ -117,21 +152,26 @@ class Renderer:
|
|
|
line_util = "| [" + \
|
|
|
"=" * mem_used_percent + \
|
|
|
" " * rest_mem + "]" + \
|
|
|
- f"{mem_used:>6}/{mem_total:<6} MB, Util: {int(utilization):>3}% |"
|
|
|
+ f"{mem_used:>6}/{mem_total:<6} MB, {self.act_dict['Util']}: {int(utilization):>3}% |"
|
|
|
|
|
|
- line_meta = f"| GPU #{index} ({gpu_type}): #Proc.: {n_processes} Temp: {temp:>3}°C " + (f"Pow: {int(power):>3} W" if self.display_power else "")
|
|
|
+ line_meta = f"| " \
|
|
|
+ f"GPU #{index} ({gpu_type}): " + \
|
|
|
+ f"{self.act_dict['#Proc']}: {n_processes} " + \
|
|
|
+ f"{self.act_dict['Temp']}: {temp:>3}°C " + \
|
|
|
+ (f"Pow: {int(power):>3} W" if self.display_power else "")
|
|
|
|
|
|
- line_meta = line_meta + " " * (len(line_util) - len(line_meta) - 1) + "|"
|
|
|
- empty_line = "|" + " " * (len(line_meta) - 2) + "|"
|
|
|
+ line_meta = fill_line(line_meta, len(line_util))
|
|
|
+ empty_line = build_line(" ", len(line_meta))
|
|
|
|
|
|
lines = [line_meta, line_util]
|
|
|
|
|
|
if self.display_users:
|
|
|
lines.append(self.get_rendered_users_line(gpu_dict["running_processes"], len(line_meta)))
|
|
|
else:
|
|
|
- lines.append(empty_line)
|
|
|
+ if self.gpu_inner_spacings:
|
|
|
+ lines.append(empty_line)
|
|
|
|
|
|
- if self.use_space_lines:
|
|
|
+ if self.gpu_outer_spacings:
|
|
|
lines = [empty_line] + lines
|
|
|
|
|
|
return lines
|
|
@@ -140,7 +180,7 @@ class Renderer:
|
|
|
user_names = [p_dict["user_name"] for p_dict in processes_list]
|
|
|
|
|
|
line = "| Users: " + ", ".join(user_names)
|
|
|
- line += " " * (line_len - len(line) - 1) + "|"
|
|
|
+ line = fill_line(line, line_len)
|
|
|
|
|
|
return line
|
|
|
|
|
@@ -149,4 +189,13 @@ class Renderer:
|
|
|
|
|
|
block_lines_new = block_lines[:-1] + [buffer_line] * (max_size - len(block_lines)) + block_lines[-1:]
|
|
|
|
|
|
- return block_lines_new
|
|
|
+ return block_lines_new
|
|
|
+
|
|
|
+ def maybe_shorten_gpu_name(self, name):
|
|
|
+ if self.dict_type == "shortened":
|
|
|
+ name = name.replace("GeForce", "").strip()
|
|
|
+
|
|
|
+ if len(name) > 13:
|
|
|
+ name = name[:10] + "..."
|
|
|
+
|
|
|
+ return name
|