[Bf-blender-cvs] [60f164b] cycles_bvh: Cycles: Switch node address to absolute values in BVH tree

Sergey Sharybin noreply at git.blender.org
Tue Jun 14 14:33:30 CEST 2016


Commit: 60f164b6aa325c31572940d56dc2b0b04f62fa0f
Author: Sergey Sharybin
Date:   Tue Jun 14 14:29:09 2016 +0200
Branches: cycles_bvh
https://developer.blender.org/rB60f164b6aa325c31572940d56dc2b0b04f62fa0f

Cycles: Switch node address to absolute values in BVH tree

This seems to be straightforward way to support heterogeneous nodes
in the same tree.

There is some penalty related on 4gig limit of the address space now,
but here are the things:

- Traversal code was already using ints to store final offset, so
  there can't be regressions really.

- We know that node size is 16 bytes aligned, so we use SHR/SHL magic
  to increase address space of nodes still.

This is a required commit to make it possible to encode both aligned
and unaligned nodes in the same array. Also, in the future we can use
this to get rid of __leaf_nodes array (which is a bit tricky to do since
trickery in pack_instances().

===================================================================

M	intern/cycles/bvh/bvh.cpp
M	intern/cycles/kernel/geom/geom.h
M	intern/cycles/kernel/geom/geom_bvh_curve.h
M	intern/cycles/kernel/geom/geom_bvh_shadow.h
M	intern/cycles/kernel/geom/geom_bvh_subsurface.h
M	intern/cycles/kernel/geom/geom_bvh_traversal.h
M	intern/cycles/kernel/geom/geom_bvh_volume.h
M	intern/cycles/kernel/geom/geom_bvh_volume_all.h
M	intern/cycles/kernel/geom/geom_qbvh.h
M	intern/cycles/kernel/geom/geom_qbvh_curve.h
M	intern/cycles/kernel/geom/geom_qbvh_shadow.h
M	intern/cycles/kernel/geom/geom_qbvh_subsurface.h
M	intern/cycles/kernel/geom/geom_qbvh_traversal.h
M	intern/cycles/kernel/geom/geom_qbvh_volume.h
M	intern/cycles/kernel/geom/geom_qbvh_volume_all.h

===================================================================

diff --git a/intern/cycles/bvh/bvh.cpp b/intern/cycles/bvh/bvh.cpp
index 93b7bf8..13f453f 100644
--- a/intern/cycles/bvh/bvh.cpp
+++ b/intern/cycles/bvh/bvh.cpp
@@ -297,8 +297,8 @@ void BVH::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
 
 		BVH *bvh = mesh->bvh;
 
-		int noffset = nodes_offset/nsize;
-		int noffset_leaf = nodes_leaf_offset/nsize_leaf;
+		int noffset = nodes_offset;
+		int noffset_leaf = nodes_leaf_offset;
 		int mesh_tri_offset = mesh->tri_offset;
 		int mesh_curve_offset = mesh->curve_offset;
 
@@ -437,7 +437,7 @@ void RegularBVH::pack_leaf(const BVHStackEntry& e,
 		data[0].w = __uint_as_float(pack.prim_type[leaf->m_lo]);
 	}
 
-	memcpy(&pack.leaf_nodes[e.idx * BVH_NODE_LEAF_SIZE], data, sizeof(float4)*BVH_NODE_LEAF_SIZE);
+	memcpy(&pack.leaf_nodes[e.idx], data, sizeof(float4)*BVH_NODE_LEAF_SIZE);
 }
 
 void RegularBVH::pack_inner(const BVHStackEntry& e,
@@ -487,9 +487,7 @@ void RegularBVH::pack_unaligned_leaf(const BVHStackEntry& e,
 		data[0].w = __uint_as_float(pack.prim_type[leaf->m_lo]);
 	}
 
-	memcpy(&pack.leaf_nodes[e.idx * BVH_NODE_LEAF_SIZE],
-	       data,
-	       sizeof(int4)*BVH_NODE_LEAF_SIZE);
+	memcpy(&pack.leaf_nodes[e.idx], data, sizeof(int4)*BVH_NODE_LEAF_SIZE);
 }
 
 void RegularBVH::pack_unaligned_inner(const BVHStackEntry& e,
@@ -774,7 +772,7 @@ void QBVH::pack_leaf(const BVHStackEntry& e, const LeafNode *leaf)
 		data[0].w = __uint_as_float(pack.prim_type[leaf->m_lo]);
 	}
 
-	memcpy(&pack.leaf_nodes[e.idx * BVH_QNODE_LEAF_SIZE], data, sizeof(float4)*BVH_QNODE_LEAF_SIZE);
+	memcpy(&pack.leaf_nodes[e.idx], data, sizeof(float4)*BVH_QNODE_LEAF_SIZE);
 }
 
 void QBVH::pack_inner(const BVHStackEntry& e, const BVHStackEntry *en, int num)
@@ -812,7 +810,7 @@ void QBVH::pack_inner(const BVHStackEntry& e, const BVHStackEntry *en, int num)
 		data[7][i] = __int_as_float(0);
 	}
 
-	memcpy(&pack.nodes[e.idx * BVH_QNODE_SIZE], data, sizeof(float4)*BVH_QNODE_SIZE);
+	memcpy(&pack.nodes[e.idx], data, sizeof(float4)*BVH_QNODE_SIZE);
 }
 
 void QBVH::pack_unaligned_leaf(const BVHStackEntry& e, const LeafNode *leaf)
@@ -833,9 +831,7 @@ void QBVH::pack_unaligned_leaf(const BVHStackEntry& e, const LeafNode *leaf)
 	if(leaf->num_triangles() != 0) {
 		data[0].w = __uint_as_float(pack.prim_type[leaf->m_lo]);
 	}
-	memcpy(&pack.leaf_nodes[e.idx * BVH_QNODE_LEAF_SIZE],
-	       data,
-	       sizeof(float4)*BVH_QNODE_LEAF_SIZE);
+	memcpy(&pack.leaf_nodes[e.idx], data, sizeof(float4)*BVH_QNODE_LEAF_SIZE);
 }
 
 void QBVH::pack_unaligned_inner(const BVHStackEntry& e,
@@ -926,9 +922,7 @@ void QBVH::pack_unaligned_inner(const BVHStackEntry& e,
 		}
 	}
 
-	memcpy(&pack.nodes[e.idx * BVH_UNALIGNED_QNODE_SIZE],
-	       data,
-	       sizeof(float4)*BVH_UNALIGNED_QNODE_SIZE);
+	memcpy(&pack.nodes[e.idx], data, sizeof(float4)*BVH_UNALIGNED_QNODE_SIZE);
 }
 
 /* Quad SIMD Nodes */
@@ -962,7 +956,8 @@ void QBVH::pack_nodes(const BVHNode *root)
 		stack.push_back(BVHStackEntry(root, nextLeafNodeIdx++));
 	}
 	else {
-		stack.push_back(BVHStackEntry(root, nextNodeIdx++));
+		stack.push_back(BVHStackEntry(root, nextNodeIdx));
+		nextNodeIdx += nsize;
 	}
 
 	while(stack.size()) {
@@ -1012,7 +1007,8 @@ void QBVH::pack_nodes(const BVHNode *root)
 					idx = nextLeafNodeIdx++;
 				}
 				else {
-					idx = nextNodeIdx++;
+					idx = nextNodeIdx;
+					nextNodeIdx += nsize;
 				}
 				stack.push_back(BVHStackEntry(nodes[i], idx));
 			}
diff --git a/intern/cycles/kernel/geom/geom.h b/intern/cycles/kernel/geom/geom.h
index f04524e..3ba62bb 100644
--- a/intern/cycles/kernel/geom/geom.h
+++ b/intern/cycles/kernel/geom/geom.h
@@ -21,15 +21,8 @@
 /* 64 object BVH + 64 mesh BVH + 64 object node splitting */
 #define BVH_STACK_SIZE 192
 #define BVH_QSTACK_SIZE 384
-#define BVH_NODE_SIZE 4
-#define BVH_NODE_LEAF_SIZE 1
-#define BVH_QNODE_SIZE 8
-#define BVH_QNODE_LEAF_SIZE 1
 #define TRI_NODE_SIZE 3
 
-#define BVH_UNALIGNED_NODE_SIZE 9
-#define BVH_UNALIGNED_QNODE_SIZE 14
-
 /* silly workaround for float extended precision that happens when compiling
  * without sse support on x86, it results in different results for float ops
  * that you would otherwise expect to compare correctly */
diff --git a/intern/cycles/kernel/geom/geom_bvh_curve.h b/intern/cycles/kernel/geom/geom_bvh_curve.h
index 7e7846e..de3cca6 100644
--- a/intern/cycles/kernel/geom/geom_bvh_curve.h
+++ b/intern/cycles/kernel/geom/geom_bvh_curve.h
@@ -20,16 +20,16 @@ ccl_device_inline Transform bvh_curve_fetch_aligned_space(KernelGlobals *kg,
 {
 	Transform aligned_space;
 	if(child == 0) {
-		aligned_space.x = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+0);
-		aligned_space.y = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+1);
-		aligned_space.z = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+2);
-		aligned_space.w = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+3);
+		aligned_space.x = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+0);
+		aligned_space.y = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+1);
+		aligned_space.z = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+2);
+		aligned_space.w = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+3);
 	}
 	else {
-		aligned_space.x = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+4);
-		aligned_space.y = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+5);
-		aligned_space.z = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+6);
-		aligned_space.w = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+7);
+		aligned_space.x = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+4);
+		aligned_space.y = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+5);
+		aligned_space.z = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+6);
+		aligned_space.w = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+7);
 	}
 	return aligned_space;
 }
@@ -84,10 +84,10 @@ ccl_device_inline int bvh_curve_intersect_aligned(KernelGlobals *kg,
 {
 
 	/* fetch node data */
-	float4 node0 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+0);
-	float4 node1 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+1);
-	float4 node2 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+2);
-	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+8);
+	float4 node0 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+0);
+	float4 node1 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+1);
+	float4 node2 = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+2);
+	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+8);
 
 	/* intersect ray against child nodes */
 	NO_EXTENDED_PRECISION float c0lox = (node0.x - P.x) * idir.x;
@@ -145,9 +145,9 @@ int ccl_device bvh_curve_intersect_node(KernelGlobals *kg,
                                         float dist[2])
 {
 	int mask = 0;
-	float4 node = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+7);
+	float4 node = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+7);
 	if(node.w != 0.0f) {
-		float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+8);
+		float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+8);
 		if(bvh_curve_intersect_unaligned_child(kg, P, dir, t, difl, nodeAddr, 0, &dist[0])) {
 			if((__float_as_uint(cnodes.z) & visibility)) {
 				mask |= 1;
@@ -234,7 +234,7 @@ int ccl_device bvh_curve_intersect_node_unaligned(KernelGlobals *kg,
 
 #  ifdef __VISIBILITY_FLAG__
 	/* this visibility test gives a 5% performance hit, how to solve? */
-	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+8);
+	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+8);
 	int cmask = (((mask & 1) && (__float_as_uint(cnodes.z) & visibility))? 1: 0) |
 	            (((mask & 2) && (__float_as_uint(cnodes.w) & visibility))? 2: 0);
 	return cmask;
@@ -258,7 +258,7 @@ int ccl_device_inline bvh_curve_intersect_node_aligned(KernelGlobals *kg,
 	const ssef pn = cast(ssei(0, 0, 0x80000000, 0x80000000));
 
 	/* fetch node data */
-	const ssef *bvh_nodes = (ssef*)kg->__bvh_curve_nodes.data + nodeAddr*BVH_UNALIGNED_NODE_SIZE;
+	const ssef *bvh_nodes = (ssef*)kg->__bvh_curve_nodes.data + nodeAddr;
 
 	/* intersect ray against child nodes */
 	const ssef tminmaxx = (shuffle_swap(bvh_nodes[0], shufflexyz[0]) - Psplat[0]) * idirsplat[0];
@@ -277,7 +277,7 @@ int ccl_device_inline bvh_curve_intersect_node_aligned(KernelGlobals *kg,
 
 #  ifdef __VISIBILITY_FLAG__
 	/* this visibility test gives a 5% performance hit, how to solve? */
-	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+8);
+	float4 cnodes = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+8);
 	int cmask = (((mask & 1) && (__float_as_uint(cnodes.z) & visibility))? 1: 0) |
 	            (((mask & 2) && (__float_as_uint(cnodes.w) & visibility))? 2: 0);
 	return cmask;
@@ -300,7 +300,7 @@ int ccl_device_inline bvh_curve_intersect_node(KernelGlobals *kg,
                                                int nodeAddr,
                                                float dist[2])
 {
-	float4 node = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr*BVH_UNALIGNED_NODE_SIZE+7);
+	float4 node = kernel_tex_fetch(__bvh_curve_nodes, nodeAddr+7);
 	if(node.w != 0.0f) {
 		return bvh_curve_intersect_node_unaligned(kg,
 		                                          P,
diff --git a/intern/cycles/kernel/geom/geom_bvh_shadow.h b/intern/cycles/kernel/geom/geom_bvh_shadow.h
index 4005489..2e000c2 100644
--- a/intern/cycles/kernel/geom/geom_bvh_shadow.h
+++ b/intern/cycles/kernel/geom/geom_bvh_shadow.h
@@ -102,10 +102,10 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals *kg,
 				float t = isect_t;
 
 				/* fetch node data */
-				float4 node0 = kernel_tex_fetch(__bvh_nodes, nodeAddr*BVH_NODE_SIZE+0);
-				float4 node1 = kernel_tex_fetch(__bvh_nodes, nodeAddr*BVH_NODE_SIZE+1);
-				float4 node2 = kernel_tex_fetch(__bvh_nodes, nodeAddr*BVH_NODE_SIZE+2);
-				float4 cnodes = kernel_tex_fetch(__bvh_nodes, nodeAddr*BVH_NODE_SIZE+3);
+				float4 node0 = kernel_tex_fetch(__bvh_nodes, nodeAddr+0);
+				float4 node1 = kernel_tex_fetch(__bvh_nodes, nodeAddr

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list