[Bf-blender-cvs] SVN commit: /data/svn/bf-blender [39485] branches/soc-2011-pepper/release/ scripts: Commenting and pep8 compliance
Benjy Cook
benjycook at hotmail.com
Wed Aug 17 12:13:25 CEST 2011
Revision: 39485
http://projects.blender.org/scm/viewvc.php?view=rev&root=bf-blender&revision=39485
Author: benjycook
Date: 2011-08-17 10:13:24 +0000 (Wed, 17 Aug 2011)
Log Message:
-----------
Commenting and pep8 compliance
Modified Paths:
--------------
branches/soc-2011-pepper/release/scripts/modules/mocap_tools.py
branches/soc-2011-pepper/release/scripts/modules/retarget.py
branches/soc-2011-pepper/release/scripts/startup/ui_mocap.py
Modified: branches/soc-2011-pepper/release/scripts/modules/mocap_tools.py
===================================================================
--- branches/soc-2011-pepper/release/scripts/modules/mocap_tools.py 2011-08-17 09:38:50 UTC (rev 39484)
+++ branches/soc-2011-pepper/release/scripts/modules/mocap_tools.py 2011-08-17 10:13:24 UTC (rev 39485)
@@ -24,7 +24,9 @@
from mathutils import Vector, Matrix
-#Vector utility functions
+# A Python implementation of n sized Vectors.
+# Mathutils has a max size of 4, and we need at least 5 for Simplify Curves and even more for Cross Correlation.
+# Vector utility functions
class NdVector:
vec = []
@@ -90,6 +92,7 @@
y = property(y)
+#Sampled Data Point class for Simplify Curves
class dataPoint:
index = 0
# x,y1,y2,y3 coordinate of original point
@@ -105,11 +108,19 @@
self.u = u
+#Cross Correlation Function
+#http://en.wikipedia.org/wiki/Cross_correlation
+#IN: curvesA, curvesB - bpy_collection/list of fcurves to analyze. Auto-Correlation is when they are the same.
+# margin - When searching for the best "start" frame, how large a neighborhood of frames should we inspect (similar to epsilon in Calculus)
+#OUT: startFrame, length of new anim, and curvesA
def crossCorrelationMatch(curvesA, curvesB, margin):
dataA = []
dataB = []
- end = len(curvesA[0].keyframe_points)
+ start, end = curvesA[0].range()
+ start = int(start)
+ end = int(end)
+ #transfer all fcurves data on each frame to a single NdVector.
for i in range(1, end):
vec = []
for fcurve in curvesA:
@@ -120,9 +131,11 @@
vec.append(fcurve.evaluate(i))
dataB.append(NdVector(vec))
+ #Comparator for Cross Correlation. "Classic" implementation uses dot product, as do we.
def comp(a, b):
return a * b
+ #Create Rxy, which holds the Cross Correlation data.
N = len(dataA)
Rxy = [0.0] * N
for i in range(N):
@@ -131,7 +144,9 @@
for j in range(i):
Rxy[i] += comp(dataA[j], dataB[j - i + N])
Rxy[i] /= float(N)
- def bestLocalMaximum(Rxy):
+
+ #Find the Local maximums in the Cross Correlation data via numerical derivative.
+ def LocalMaximums(Rxy):
Rxyd = [Rxy[i] - Rxy[i - 1] for i in range(1, len(Rxy))]
maxs = []
for i in range(1, len(Rxyd) - 1):
@@ -142,9 +157,12 @@
maxs.append((i, max(Rxy[i], Rxy[i - 1])))
return [x[0] for x in maxs]
#~ return max(maxs, key=lambda x: x[1])[0]
-
- flms = bestLocalMaximum(Rxy[0:int(len(Rxy))])
+
+ #flms - the possible offsets of the first part of the animation. In Auto-Corr, this is the length of the loop.
+ flms = LocalMaximums(Rxy[0:int(len(Rxy))])
ss = []
+
+ #for every local maximum, find the best one - i.e. also has the best start frame.
for flm in flms:
diff = []
@@ -159,20 +177,28 @@
if errorSlice < bestSlice[1]:
bestSlice = (i, errorSlice, flm)
return bestSlice
-
+
s = lowerErrorSlice(diff, margin)
ss.append(s)
- ss.sort(key = lambda x: x[1])
+ #Find the best result and return it.
+ ss.sort(key=lambda x: x[1])
return ss[0][2], ss[0][0], dataA
+
+#Uses auto correlation (cross correlation of the same set of curves) and trims the active_object's fcurves
+#Except for location curves (which in mocap tend to be not cyclic, e.g. a walk cycle forward)
+#Transfers the fcurve data to a list of NdVector (length of list is number of fcurves), and calls the cross correlation function.
+#Then trims the fcurve accordingly.
+#IN: Nothing, set the object you want as active and call. Assumes object has animation_data.action!
+#OUT: Trims the object's fcurves (except location curves).
def autoloop_anim():
context = bpy.context
obj = context.active_object
-
+
def locCurve(x):
x.data_path == "location"
-
+
fcurves = [x for x in obj.animation_data.action.fcurves if not locCurve(x)]
margin = 10
@@ -180,13 +206,10 @@
flm, s, data = crossCorrelationMatch(fcurves, fcurves, margin)
loop = data[s:s + flm]
- #find *all* loops, s:s+flm, s+flm:s+2flm, etc...
- #and interpolate between all
- # to find "the perfect loop".
- #Maybe before finding s? interp(i,i+flm,i+2flm)....
- #~ for i in range(1, margin + 1):
- #~ w1 = sqrt(float(i) / margin)
- #~ loop[-i] = (loop[-i] * w1) + (loop[0] * (1 - w1))
+ #performs blending with a root falloff on the seam's neighborhood to ensure good tiling.
+ for i in range(1, margin + 1):
+ w1 = sqrt(float(i) / margin)
+ loop[-i] = (loop[-i] * w1) + (loop[0] * (1 - w1))
for curve in fcurves:
pts = curve.keyframe_points
@@ -201,8 +224,16 @@
context.scene.frame_end = flm
+#simplifyCurves: performes the bulk of the samples to bezier conversion.
+#IN: curveGroup - which can be a collection of singleFcurves, or grouped (via nested lists) .
+# error - threshold of permittable error (max distance) of the new beziers to the original data
+# reparaError - threshold of error where we should try to fix the parameterization rather than split the existing curve. > error, usually by a small constant factor for best performance.
+# maxIterations - maximum number of iterations of reparameterizations we should attempt. (Newton-Rahpson is not guarenteed to converge, so this is needed).
+# group_mode - boolean, indicating wether we should place bezier keyframes on the same x (frame), or optimize each individual curve.
+#OUT: None. Deletes the existing curves and creates the new beziers.
def simplifyCurves(curveGroup, error, reparaError, maxIterations, group_mode):
+ #Calculates the unit tangent of point v
def unitTangent(v, data_pts):
tang = NdVector((0, 0, 0, 0, 0))
if v != 0:
@@ -214,7 +245,8 @@
tang.normalize()
return tang
- #assign parametric u value for each point in original data
+ #assign parametric u value for each point in original data, via relative arc length
+ #http://en.wikipedia.org/wiki/Arc_length
def chordLength(data_pts, s, e):
totalLength = 0
for pt in data_pts[s:e + 1]:
@@ -230,7 +262,7 @@
print(s, e)
pt.u = (pt.temp / totalLength)
- # get binomial coefficient, this function/table is only called with args
+ # get binomial coefficient lookup table, this function/table is only called with args
# (3,0),(3,1),(3,2),(3,3),(2,0),(2,1),(2,2)!
binomDict = {(3, 0): 1,
(3, 1): 3,
@@ -239,8 +271,8 @@
(2, 0): 1,
(2, 1): 2,
(2, 2): 1}
+
#value at pt t of a single bernstein Polynomial
-
def bernsteinPoly(n, i, t):
binomCoeff = binomDict[(n, i)]
return binomCoeff * pow(t, i) * pow(1 - t, n - i)
@@ -380,6 +412,7 @@
fud = 1
pt.u = pt.u - (fu / fud)
+ #Create data_pts, a list of dataPoint type, each is assigned index i, and an NdVector
def createDataPts(curveGroup, group_mode):
data_pts = []
if group_mode:
@@ -403,6 +436,7 @@
data_pts.append(dataPoint(i, NdVector((x, y1, y2, y3, y4))))
return data_pts
+ #Recursively fit cubic beziers to the data_pts between s and e
def fitCubic(data_pts, s, e):
# if there are less than 3 points, fit a single basic bezier
if e - s < 3:
@@ -437,6 +471,7 @@
beziers.append(bez)
return
+ # deletes the sampled points and creates beziers.
def createNewCurves(curveGroup, beziers, group_mode):
#remove all existing data points
if group_mode:
@@ -483,15 +518,14 @@
#remove old Fcurves and insert the new ones
createNewCurves(curveGroup, beziers, group_mode)
-#Main function of simplification
-#sel_opt: either "sel" or "all" for which curves to effect
-#error: maximum error allowed, in fraction (20% = 0.0020),
-#i.e. divide by 10000 from percentage wanted.
-#group_mode: boolean, to analyze each curve seperately or in groups,
-#where group is all curves that effect the same property
-#(e.g. a bone's x,y,z rotation)
-
+#Main function of simplification, which called by Operator
+#IN:
+# sel_opt- either "sel" (selected) or "all" for which curves to effect
+# error- maximum error allowed, in fraction (20% = 0.0020, which is the default),
+# i.e. divide by 10000 from percentage wanted.
+# group_mode- boolean, to analyze each curve seperately or in groups,
+# where a group is all curves that effect the same property/RNA path
def fcurves_simplify(context, obj, sel_opt="all", error=0.002, group_mode=True):
# main vars
fcurves = obj.animation_data.action.fcurves
@@ -533,11 +567,12 @@
return
+
# Implementation of non-linear median filter, with variable kernel size
-# Double pass - one marks spikes, the other smooths one
+# Double pass - one marks spikes, the other smooths them
# Expects sampled keyframes on everyframe
-
-
+# IN: None. Performs the operations on the active_object's fcurves. Expects animation_data.action to exist!
+# OUT: None. Fixes the fcurves "in-place".
def denoise_median():
context = bpy.context
obj = context.active_object
@@ -568,6 +603,9 @@
return
+# Recieves armature, and rotations all bones by 90 degrees along the X axis
+# This fixes the common axis issue BVH files have when importing.
+# IN: Armature (bpy.types.Armature)
def rotate_fix_armature(arm_data):
global_matrix = Matrix.Rotation(radians(90), 4, "X")
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
@@ -588,6 +626,8 @@
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+#Roughly scales the performer armature to match the enduser armature
+#IN: perfromer_obj, enduser_obj, Blender objects whose .data is an armature.
def scale_fix_armature(performer_obj, enduser_obj):
perf_bones = performer_obj.data.bones
end_bones = enduser_obj.data.bones
@@ -611,6 +651,8 @@
performer_obj.scale *= factor
+#Guess Mapping
+#Given a performer and enduser armature, attempts to guess the hiearchy mapping
def guessMapping(performer_obj, enduser_obj):
perf_bones = performer_obj.data.bones
end_bones = enduser_obj.data.bones
@@ -642,11 +684,16 @@
@@ Diff output truncated at 10240 characters. @@
More information about the Bf-blender-cvs
mailing list