[Bf-blender-cvs] SVN commit: /data/svn/repos/bf-blender [61761] trunk/lib/tests/modeling: Made bevel test pass again.

Howard Trickey howard.trickey at gmail.com
Tue Dec 6 13:35:47 CET 2016


Revision: 61761
          https://developer.blender.org/rBL61761
Author:   howardt
Date:     2016-12-06 13:35:45 +0100 (Tue, 06 Dec 2016)
Log Message:
-----------
Made bevel test pass again.

Some internal changes in Blender caused some different
ordering of some loops and uv data. Regenerated golden data for those.
Also, added option to mesh_ops_test.py to replace golden data with
current test result.

Modified Paths:
--------------
    trunk/lib/tests/modeling/bevel_regression.blend
    trunk/lib/tests/modeling/mesh_ops_test.py

Modified: trunk/lib/tests/modeling/bevel_regression.blend
===================================================================
(Binary files differ)

Modified: trunk/lib/tests/modeling/mesh_ops_test.py
===================================================================
--- trunk/lib/tests/modeling/mesh_ops_test.py	2016-12-04 01:22:20 UTC (rev 61760)
+++ trunk/lib/tests/modeling/mesh_ops_test.py	2016-12-06 12:35:45 UTC (rev 61761)
@@ -37,8 +37,10 @@
 #
 # The things in angle brackets are parameters of the test, and are specified in
 # a declarative TestSpec.
-# We need to run this in a blend file that has the given <test object> and <expect object>,
-# with identical locations (will probably be convenient to put these on separate layers).
+#
+# If tests fail and it is because of a known and OK change due to things that have
+# changed in Blender, we can use the 'update_expected' parameter of RunTest
+# to update the <expected object>.
 
 import bpy
 
@@ -172,6 +174,7 @@
         if verbose:
             print('Bad select type', select[0])
         return 'VERT'
+    bpy.context.tool_settings.mesh_select_mode = (seltype == 'VERT', seltype == 'EDGE', seltype == 'FACE')
     parts = select[1:].split()
     try:
         elems = set([int(p) for p in parts])
@@ -192,7 +195,7 @@
     return seltype
 
 
-def RunTest(t, cleanup=True, verbose=False):
+def RunTest(t, cleanup=True, verbose=False, update_expected=False):
     """Run the test specified by given TestSpec.
 
     Args:
@@ -199,6 +202,9 @@
         t: TestSpec
         cleanup: bool - should we clean up duplicate after the test
         verbose: bool - should be we wordy
+        update_expected: bool - should we replace the golden expected object
+                                 with the result of current run?
+                                 Only has effect if cleanup is false.
     Returns:
         bool - True if test passes, False otherwise
     """
@@ -258,10 +264,23 @@
         bpy.ops.object.delete()
         otest.select = True
         bpy.context.scene.objects.active = otest
+    elif update_expected:
+        if verbose:
+            print('Updating expected object', t.expected_obj)
+        otestdup.location = oexpected.location
+        otestdup.layers = oexpected.layers
+        bpy.context.scene.layers = oexpected.layers
+        bpy.context.scene.objects.active = oexpected
+        bpy.ops.object.select_all(action='DESELECT')
+        oexpected.select = True
+        bpy.ops.object.delete()
+        otestdup.name = t.expected_obj
+        bpy.context.scene.objects.active = otest
+        bpy.context.scene.layers = otest.layers
     return success
 
 
-def RunAllTests(tests, cleanup=True, verbose=False):
+def RunAllTests(tests, cleanup=True, verbose=False, update_expected=False):
     """Run all tests.
 
     Args:
@@ -268,6 +287,8 @@
         tests: list of TestSpec - tests to run
         cleanup: bool - if True, don't leave result objects lying around
         verbose: bool - if True, chatter about running tests and failures
+        update_expected: bool - if True, replace all expected objects with
+                                current results
     Returns:
         bool - True if all tests pass
     """
@@ -276,7 +297,7 @@
     failed = 0
     for t in tests:
         tot += 1
-        if not RunTest(t, cleanup=cleanup, verbose=verbose):
+        if not RunTest(t, cleanup=cleanup, verbose=verbose, update_expected=update_expected):
             failed += 1
     if verbose:
         print('Ran', tot, 'tests,' if tot > 1 else 'test,', failed, 'failed')




More information about the Bf-blender-cvs mailing list