[Bf-blender-cvs] [6fc94d18485] master: Tests: updates for performance benchmarking

Brecht Van Lommel noreply at git.blender.org
Wed Sep 8 16:52:27 CEST 2021


Commit: 6fc94d18485a57d262a3301ad6255b60abcfd883
Author: Brecht Van Lommel
Date:   Wed Sep 8 15:56:50 2021 +0200
Branches: master
https://developer.blender.org/rB6fc94d18485a57d262a3301ad6255b60abcfd883

Tests: updates for performance benchmarking

* Make "run" command (re-)run all tests, add "update" command to only
  run queued and outdated tests equivalent to the old "run" command.
* Support specifying environment variables for revisions, to easily
  compare multiple parameter values.
* Better sorting of revisions in graph.

===================================================================

M	tests/performance/api/config.py
M	tests/performance/api/environment.py
M	tests/performance/api/graph.py
M	tests/performance/benchmark

===================================================================

diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py
index d3a79eede14..aa991e7d7d8 100644
--- a/tests/performance/api/config.py
+++ b/tests/performance/api/config.py
@@ -25,6 +25,7 @@ class TestEntry:
     category: str = ''
     revision: str = ''
     git_hash: str = ''
+    environment: Dict = field(default_factory=dict)
     executable: str = ''
     date: int = 0
     device_type: str = 'CPU'
@@ -191,9 +192,10 @@ class TestConfig:
 
         # Get entries for specified commits, tags and branches.
         for revision_name, revision_commit in self.revisions.items():
+            revision_commit, environment = self._split_environment_variables(revision_commit)
             git_hash = env.resolve_git_hash(revision_commit)
             date = env.git_hash_date(git_hash)
-            entries += self._get_entries(revision_name, git_hash, '', date)
+            entries += self._get_entries(revision_name, git_hash, '', environment, date)
 
         # Optimization to avoid rebuilds.
         revisions_to_build = set()
@@ -204,6 +206,7 @@ class TestConfig:
 
         # Get entries for revisions based on existing builds.
         for revision_name, executable in self.builds.items():
+            executable, environment = self._split_environment_variables(executable)
             executable_path = env._blender_executable_from_path(pathlib.Path(executable))
             if not executable_path:
                 sys.stderr.write(f'Error: build {executable} not found\n')
@@ -214,7 +217,7 @@ class TestConfig:
             env.set_default_blender_executable()
 
             mtime = executable_path.stat().st_mtime
-            entries += self._get_entries(revision_name, git_hash, executable, mtime)
+            entries += self._get_entries(revision_name, git_hash, executable, environment, mtime)
 
         # Detect number of categories for more compact printing.
         categories = set()
@@ -229,6 +232,7 @@ class TestConfig:
                      revision_name: str,
                      git_hash: str,
                      executable: pathlib.Path,
+                     environment: str,
                      date: int) -> None:
         entries = []
         for test in self.tests.tests:
@@ -241,10 +245,12 @@ class TestConfig:
                     # Test if revision hash or executable changed.
                     if entry.git_hash != git_hash or \
                        entry.executable != executable or \
+                       entry.environment != environment or \
                        entry.benchmark_type != self.benchmark_type or \
                        entry.date != date:
                         # Update existing entry.
                         entry.git_hash = git_hash
+                        entry.environment = environment
                         entry.executable = executable
                         entry.benchmark_type = self.benchmark_type
                         entry.date = date
@@ -256,6 +262,7 @@ class TestConfig:
                         revision=revision_name,
                         git_hash=git_hash,
                         executable=executable,
+                        environment=environment,
                         date=date,
                         test=test_name,
                         category=test_category,
@@ -266,3 +273,9 @@ class TestConfig:
                 entries.append(entry)
 
         return entries
+
+    def _split_environment_variables(self, revision):
+        if isinstance(revision, str):
+            return revision, {}
+        else:
+            return revision[0], revision[1]
diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py
index 76c731b6118..750d991ebc8 100644
--- a/tests/performance/api/environment.py
+++ b/tests/performance/api/environment.py
@@ -104,9 +104,10 @@ class TestEnvironment:
         self._init_default_blender_executable()
         return True
 
-    def set_blender_executable(self, executable_path: pathlib.Path) -> None:
+    def set_blender_executable(self, executable_path: pathlib.Path, environment: Dict = {}) -> None:
         # Run all Blender commands with this executable.
         self.blender_executable = executable_path
+        self.blender_executable_environment = environment
 
     def _blender_executable_name(self) -> pathlib.Path:
         if platform.system() == "Windows":
@@ -150,6 +151,7 @@ class TestEnvironment:
 
     def set_default_blender_executable(self) -> None:
         self.blender_executable = self.default_blender_executable
+        self.blender_executable_environment = {}
 
     def set_log_file(self, filepath: pathlib.Path, clear=True) -> None:
         # Log all commands and output to this file.
@@ -161,7 +163,7 @@ class TestEnvironment:
     def unset_log_file(self) -> None:
         self.log_file = None
 
-    def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
+    def call(self, args: List[str], cwd: pathlib.Path, silent: bool=False, environment: Dict={}) -> List[str]:
         # Execute command with arguments in specified directory,
         # and return combined stdout and stderr output.
 
@@ -173,7 +175,13 @@ class TestEnvironment:
             f = open(self.log_file, 'a')
             f.write('\n' + ' '.join([str(arg) for arg in args]) + '\n\n')
 
-        proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        env = os.environ
+        if len(environment):
+            env = env.copy()
+            for key, value in environment.items():
+                env[key] = value
+
+        proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
 
         # Read line by line
         lines = []
@@ -208,7 +216,8 @@ class TestEnvironment:
         else:
             common_args += ['--background']
 
-        return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
+        return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir,
+                         environment=self.blender_executable_environment)
 
     def run_in_blender(self,
                        function: Callable[[Dict], Dict],
diff --git a/tests/performance/api/graph.py b/tests/performance/api/graph.py
index 4ee5ae7cf0e..fe4d4800894 100644
--- a/tests/performance/api/graph.py
+++ b/tests/performance/api/graph.py
@@ -42,7 +42,7 @@ class TestGraph:
 
             # Generate one graph for every device x category x result key combination.
             for category, category_entries in categories.items():
-                entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
+                entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test, entry.date))
 
                 outputs = set()
                 for entry in entries:
@@ -58,8 +58,6 @@ class TestGraph:
         self.json = json.dumps(data, indent=2)
 
     def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
-        entries = sorted(entries, key=lambda entry: entry.date)
-
         # Gather used tests.
         tests = {}
         for entry in entries:
diff --git a/tests/performance/benchmark b/tests/performance/benchmark
index ad1e07d0ef3..343af3be7d1 100755
--- a/tests/performance/benchmark
+++ b/tests/performance/benchmark
@@ -83,15 +83,20 @@ def match_entry(entry: api.TestEntry, args: argparse.Namespace):
            entry.test.find(args.test) != -1 or \
            entry.category.find(args.test) != -1
 
-def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry: api.TestEntry):
+def run_entry(env: api.TestEnvironment,
+              config: api.TestConfig,
+              row: List,
+              entry: api.TestEntry,
+              update_only: bool):
     # Check if entry needs to be run.
-    if entry.status not in ('queued', 'outdated'):
+    if update_only and entry.status not in ('queued', 'outdated'):
         print_row(config, row, end='\r')
         return False
 
     # Run test entry.
     revision = entry.revision
     git_hash = entry.git_hash
+    environment = entry.environment
     testname = entry.test
     testcategory = entry.category
     device_type = entry.device_type
@@ -116,13 +121,15 @@ def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry
     print_row(config, row, end='\r')
     executable_ok = True
     if len(entry.executable):
-        env.set_blender_executable(pathlib.Path(entry.executable))
+        env.set_blender_executable(pathlib.Path(entry.executable), environment)
     else:
         env.checkout(git_hash)
         executable_ok = env.build()
         if not executable_ok:
             entry.status = 'failed'
             entry.error_msg = 'Failed to build'
+        else:
+            env.set_blender_executable(env.blender_executable, environment)
 
     # Run test and update output and status.
     if executable_ok:
@@ -219,7 +226,7 @@ def cmd_reset(env: api.TestEnvironment, argv: List):
 
         config.queue.write()
 
-def cmd_run(env: api.TestEnvironment, argv: List):
+def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
     # Run tests.
     parser = argparse.ArgumentParser()
     parser.add_argument('config', nargs='?', default=None)
@@ -233,7 +240,7 @@ def cmd_run(env: api.TestEnvironment, argv: List):
         for row in config.queue.rows(use_revision_columns(config)):
             if match_entry(row[0], args):
                 for entry in row:
-                    if run_entry(env, config, row, entry):
+                    if run_entry(env, config, row, entry, update_only):
                         updated = True
                         # Write queue every time in case running gets interrupted,
                         # so it can be resumed.
@@ -268,8 +275,9 @@ def main():
              '  \n'
              '  list                                 List available tests, devices and configurations\n'
              '  \n'
-             '  run [<config>] [<test>]              Execute tests for configuration\n'
-             '  reset [<config>] [<test>]            Clear tests re

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list