[Bf-blender-cvs] [37a5ff4a847] master: Tests: support graphing peak memory in Cycles performance tests

Brecht Van Lommel noreply at git.blender.org
Wed Jul 14 16:12:17 CEST 2021


Commit: 37a5ff4a8470a0040f88228bbf3d439c42389446
Author: Brecht Van Lommel
Date:   Wed Jul 14 14:03:04 2021 +0200
Branches: master
https://developer.blender.org/rB37a5ff4a8470a0040f88228bbf3d439c42389446

Tests: support graphing peak memory in Cycles performance tests

The general graphing mechanism will create one graph for each output
variable. So it's not limited to time and memory, but that is what the
Cycles tests now output.

===================================================================

M	tests/performance/api/graph.py
M	tests/performance/api/graph.template.html
M	tests/performance/tests/cycles.py

===================================================================

diff --git a/tests/performance/api/graph.py b/tests/performance/api/graph.py
index eb411915ad9..b3c8329ff27 100644
--- a/tests/performance/api/graph.py
+++ b/tests/performance/api/graph.py
@@ -39,15 +39,24 @@ class TestGraph:
                 else:
                     categories[category] = [entry]
 
-            # Generate one graph for every device x category combination.
+            # Generate one graph for every device x category x result key combination.
             for category, category_entries in categories.items():
                 entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
+
+                outputs = set()
+                for entry in entries:
+                    for output in entry.output.keys():
+                        outputs.add(output)
+
                 chart_type = 'line' if entries[0].benchmark_type == 'time_series' else 'comparison'
-                data.append(self.chart(device_name, category, entries, chart_type))
+
+                for output in outputs:
+                    chart_name = f"{category} ({output})"
+                    data.append(self.chart(device_name, chart_name, entries, chart_type, output))
 
         self.json = json.dumps(data, indent=2)
 
-    def chart(self, device_name: str, category: str, entries: List, chart_type: str) -> Dict:
+    def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
         # Gather used tests.
         tests = {}
         for entry in entries:
@@ -71,7 +80,7 @@ class TestGraph:
         if chart_type == 'line':
             cols.append({'id': '', 'label': 'Date', 'type': 'date'})
         else:
-            cols.append({'id': '', 'label': 'Revision', 'type': 'string'})
+            cols.append({'id': '', 'label': ' ', 'type': 'string'})
         for test, test_index in tests.items():
             cols.append({'id': '', 'label': test, 'type': 'number'})
 
@@ -88,11 +97,11 @@ class TestGraph:
         for entry in entries:
             test_index = tests[entry.test]
             revision_index = revisions[entry.revision]
-            time = entry.output['time']
+            time = entry.output[output] if output in entry.output else -1.0
             rows[revision_index]['c'][test_index + 1] = {'f': None, 'v': time}
 
         data = {'cols': cols, 'rows': rows}
-        return {'device': device_name, 'category': category, 'data': data, 'chart_type': chart_type}
+        return {'device': device_name, 'name': chart_name, 'data': data, 'chart_type': chart_type}
 
     def write(self, filepath: pathlib.Path) -> None:
         # Write HTML page with JSON graph data embedded.
diff --git a/tests/performance/api/graph.template.html b/tests/performance/api/graph.template.html
index 8929c2bdd1b..147f1628c23 100644
--- a/tests/performance/api/graph.template.html
+++ b/tests/performance/api/graph.template.html
@@ -52,7 +52,7 @@
 
         /* Chart drawing options. */
         var options = {
-          chart: {title: device["category"], subtitle: device['device']},
+          chart: {title: device["name"], subtitle: device['device']},
           pointsVisible: true,
           pointSize: 2.5,
           height: 500,
diff --git a/tests/performance/tests/cycles.py b/tests/performance/tests/cycles.py
index f79e7333458..bac6b8a7ceb 100644
--- a/tests/performance/tests/cycles.py
+++ b/tests/performance/tests/cycles.py
@@ -65,16 +65,26 @@ class CyclesTest(api.Test):
         _, lines = env.run_in_blender(_run, args, ['--debug-cycles', '--verbose', '1', self.filepath])
 
         # Parse render time from output
-        prefix = "Render time (without synchronization): "
-        time = 0.0
+        prefix_time = "Render time (without synchronization): "
+        prefix_memory = "Peak: "
+        time = None
+        memory = None
         for line in lines:
             line = line.strip()
-            offset = line.find(prefix)
+            offset = line.find(prefix_time)
             if offset != -1:
-                time = line[offset + len(prefix):]
-                return {'time': float(time)}
+                time = line[offset + len(prefix_time):]
+                time = float(time)
+            offset = line.find(prefix_memory)
+            if offset != -1:
+                memory = line[offset + len(prefix_memory):]
+                memory = memory.split()[0].replace(',', '')
+                memory = float(memory)
+
+        if not (time and memory):
+            raise Exception("Error parsing render time output")
 
-        raise Exception("Error parsing render time output")
+        return {'time': time, 'peak_memory': memory}
 
 
 def generate(env):



More information about the Bf-blender-cvs mailing list