[Bf-blender-cvs] [9d0ac94] master: Fix T49750: Cycles wrong ray differentials for perspective and stereo cameras.

Brecht Van Lommel noreply at git.blender.org
Sat Oct 22 16:38:21 CEST 2016


Commit: 9d0ac94d52268ff34ce645035d4315d6018d02b9
Author: Brecht Van Lommel
Date:   Sat Oct 22 15:59:23 2016 +0200
Branches: master
https://developer.blender.org/rB9d0ac94d52268ff34ce645035d4315d6018d02b9

Fix T49750: Cycles wrong ray differentials for perspective and stereo cameras.

===================================================================

M	intern/cycles/kernel/kernel_camera.h
M	intern/cycles/kernel/kernel_projection.h

===================================================================

diff --git a/intern/cycles/kernel/kernel_camera.h b/intern/cycles/kernel/kernel_camera.h
index ed9726e..de3d70b 100644
--- a/intern/cycles/kernel/kernel_camera.h
+++ b/intern/cycles/kernel/kernel_camera.h
@@ -105,39 +105,61 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
 	}
 #endif
 
-	float3 tP = transform_point(&cameratoworld, ray->P);
-	float3 tD = transform_direction(&cameratoworld, ray->D);
-	ray->P = spherical_stereo_position(kg, tD, tP);
-	ray->D = spherical_stereo_direction(kg, tD, tP, ray->P);
+	ray->P = transform_point(&cameratoworld, ray->P);
+	ray->D = normalize(transform_direction(&cameratoworld, ray->D));
 
+	bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
+	if (!use_stereo) {
+		/* No stereo */
 #ifdef __RAY_DIFFERENTIALS__
-	/* ray differential */
-	ray->dP = differential3_zero();
-
-	float3 tD_diff = transform_direction(&cameratoworld, Pcamera);
-	float3 Pdiff = spherical_stereo_position(kg, tD_diff, Pcamera);
-	float3 Ddiff = spherical_stereo_direction(kg, tD_diff, Pcamera, Pdiff);
-
-	tP = transform_perspective(&rastertocamera,
-	                           make_float3(raster_x + 1.0f, raster_y, 0.0f));
-	tD = tD_diff + float4_to_float3(kernel_data.cam.dx);
-	Pcamera = spherical_stereo_position(kg, tD, tP);
-	ray->dD.dx = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
-	ray->dP.dx = Pcamera - Pdiff;
-
-	tP = transform_perspective(&rastertocamera,
-	                           make_float3(raster_x, raster_y + 1.0f, 0.0f));
-	tD = tD_diff + float4_to_float3(kernel_data.cam.dy);
-	Pcamera = spherical_stereo_position(kg, tD, tP);
-	ray->dD.dy = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
-	/* dP.dy is zero, since the omnidirectional panorama only shift the eyes horizontally */
+		float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
+
+		ray->dP = differential3_zero();
+		ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
+		ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
+#endif
+	}
+	else {
+		/* Spherical stereo */
+		spherical_stereo_transform(kg, &ray->P, &ray->D);
+
+#ifdef __RAY_DIFFERENTIALS__
+		/* Ray differentials, computed from scratch using the raster coordinates
+		 * because we don't want to be affected by depth of field. We compute
+		 * ray origin and direction for the center and two neighbouring pixels
+		 * and simply take their differences. */
+		float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
+
+		float3 Pcenter = Pnostereo;
+		float3 Dcenter = Pcamera;
+		Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
+		spherical_stereo_transform(kg, &Pcenter, &Dcenter);
+
+		float3 Px = Pnostereo;
+		float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
+		Dx = normalize(transform_direction(&cameratoworld, Dx));
+		spherical_stereo_transform(kg, &Px, &Dx);
+
+		ray->dP.dx = Px - Pcenter;
+		ray->dD.dx = Dx - Dcenter;
+
+		float3 Py = Pnostereo;
+		float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
+		Dy = normalize(transform_direction(&cameratoworld, Dy));
+		spherical_stereo_transform(kg, &Py, &Dy);
+
+		ray->dP.dy = Py - Pcenter;
+		ray->dD.dy = Dy - Dcenter;
 #endif
+	}
 
 #ifdef __CAMERA_CLIPPING__
 	/* clipping */
-	float3 Pclip = normalize(Pcamera);
-	float z_inv = 1.0f / Pclip.z;
-	ray->P += kernel_data.cam.nearclip*ray->D * z_inv;
+	float z_inv = 1.0f / normalize(Pcamera).z;
+	float nearclip = kernel_data.cam.nearclip * z_inv;
+	ray->P += nearclip * ray->D;
+	ray->dP.dx += nearclip * ray->dD.dx;
+	ray->dP.dy += nearclip * ray->dD.dy;
 	ray->t = kernel_data.cam.cliplength * z_inv;
 #else
 	ray->t = FLT_MAX;
@@ -268,36 +290,57 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
 	}
 #endif
 
-	float3 tP = transform_point(&cameratoworld, ray->P);
-	float3 tD = transform_direction(&cameratoworld, ray->D);
-	ray->P = spherical_stereo_position(kg, tD, tP);
-	ray->D = spherical_stereo_direction(kg, tD, tP, ray->P);
+	ray->P = transform_point(&cameratoworld, ray->P);
+	ray->D = normalize(transform_direction(&cameratoworld, ray->D));
+
+	/* Stereo transform */
+	bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
+	if (use_stereo) {
+		spherical_stereo_transform(kg, &ray->P, &ray->D);
+	}
 
 #ifdef __RAY_DIFFERENTIALS__
-	/* ray differential */
-	ray->dP = differential3_zero();
-
-	tP = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
-	tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
-	float3 Pdiff = spherical_stereo_position(kg, tD, tP);
-	float3 Ddiff = spherical_stereo_direction(kg, tD, tP, Pdiff);
-
-	tP = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
-	tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
-	Pcamera = spherical_stereo_position(kg, tD, tP);
-	ray->dD.dx = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
-	ray->dP.dx = Pcamera - Pdiff;
-
-	tP = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
-	tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
-	Pcamera = spherical_stereo_position(kg, tD, tP);
-	ray->dD.dy = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
-	/* dP.dy is zero, since the omnidirectional panorama only shift the eyes horizontally */
+	/* Ray differentials, computed from scratch using the raster coordinates
+	 * because we don't want to be affected by depth of field. We compute
+	 * ray origin and direction for the center and two neighbouring pixels
+	 * and simply take their differences. */
+	float3 Pcenter = Pcamera;
+	float3 Dcenter = panorama_to_direction(kg, Pcenter.x, Pcenter.y);
+	Pcenter = transform_point(&cameratoworld, Pcenter);
+	Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
+	if (use_stereo) {
+		spherical_stereo_transform(kg, &Pcenter, &Dcenter);
+	}
+
+	float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
+	float3 Dx = panorama_to_direction(kg, Px.x, Px.y);
+	Px = transform_point(&cameratoworld, Px);
+	Dx = normalize(transform_direction(&cameratoworld, Dx));
+	if (use_stereo) {
+		spherical_stereo_transform(kg, &Px, &Dx);
+	}
+
+	ray->dP.dx = Px - Pcenter;
+	ray->dD.dx = Dx - Dcenter;
+
+	float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
+	float3 Dy = panorama_to_direction(kg, Py.x, Py.y);
+	Py = transform_point(&cameratoworld, Py);
+	Dy = normalize(transform_direction(&cameratoworld, Dy));
+	if (use_stereo) {
+		spherical_stereo_transform(kg, &Py, &Dy);
+	}
+
+	ray->dP.dy = Py - Pcenter;
+	ray->dD.dy = Dy - Dcenter;
 #endif
 
 #ifdef __CAMERA_CLIPPING__
 	/* clipping */
-	ray->P += kernel_data.cam.nearclip*ray->D;
+	float nearclip = kernel_data.cam.nearclip;
+	ray->P += nearclip * ray->D;
+	ray->dP.dx += nearclip * ray->dD.dx;
+	ray->dP.dy += nearclip * ray->dD.dy;
 	ray->t = kernel_data.cam.cliplength;
 #else
 	ray->t = FLT_MAX;
diff --git a/intern/cycles/kernel/kernel_projection.h b/intern/cycles/kernel/kernel_projection.h
index 3437d83..ba714b6 100644
--- a/intern/cycles/kernel/kernel_projection.h
+++ b/intern/cycles/kernel/kernel_projection.h
@@ -224,24 +224,18 @@ ccl_device_inline float2 direction_to_panorama(KernelGlobals *kg, float3 dir)
 	}
 }
 
-ccl_device_inline float3 spherical_stereo_position(KernelGlobals *kg,
-                                                   float3 dir,
-                                                   float3 pos)
+ccl_device_inline void spherical_stereo_transform(KernelGlobals *kg, float3 *P, float3 *D)
 {
 	float interocular_offset = kernel_data.cam.interocular_offset;
 
 	/* Interocular offset of zero means either non stereo, or stereo without
-	 * spherical stereo.
-	 */
-	if(interocular_offset == 0.0f) {
-		return pos;
-	}
+	 * spherical stereo. */
+	kernel_assert(interocular_offset != 0.0f);
 
 	if(kernel_data.cam.pole_merge_angle_to > 0.0f) {
-		float3 normalized_direction = normalize(dir);
 		const float pole_merge_angle_from = kernel_data.cam.pole_merge_angle_from,
 		            pole_merge_angle_to = kernel_data.cam.pole_merge_angle_to;
-		float altitude = fabsf(safe_asinf(normalized_direction.z));
+		float altitude = fabsf(safe_asinf(D->z));
 		if(altitude > pole_merge_angle_to) {
 			interocular_offset = 0.0f;
 		}
@@ -253,32 +247,20 @@ ccl_device_inline float3 spherical_stereo_position(KernelGlobals *kg,
 	}
 
 	float3 up = make_float3(0.0f, 0.0f, 1.0f);
-	float3 side = normalize(cross(dir, up));
+	float3 side = normalize(cross(*D, up));
+	float3 stereo_offset = side * interocular_offset;
 
-	return pos + (side * interocular_offset);
-}
+	*P += stereo_offset;
 
-/* NOTE: Ensures direction is normalized. */
-ccl_device float3 spherical_stereo_direction(KernelGlobals *kg,
-                                             float3 dir,
-                                             float3 pos,
-                                             float3 newpos)
-{
+	/* Convergence distance is FLT_MAX in the case of parallel convergence mode,
+	 * no need to modify direction in this case either. */
 	const float convergence_distance = kernel_data.cam.convergence_distance;
-	const float3 normalized_dir = normalize(dir);
-	/* Interocular offset of zero means either no stereo, or stereo without
-	 * spherical stereo.
-	 * Convergence distance is FLT_MAX in the case of parallel convergence mode,
-	 * no need to mdify direction in this case either.
-	 */
-	if(kernel_data.cam.interocular_offset == 0.0f ||
-	   convergence_distance == FLT_MAX)
+
+	if(convergence_distance != FLT_MAX)
 	{
-		return normalized_dir;
+		float3 screen_offset = convergence_distance * (*D);
+		*D = normalize(screen_offset - stereo_offset);
 	}
-
-	float3 screenpos = pos + (normalized_dir * convergence_distance);
-	return normalize(screenpos - newpos);
 }
 
 CCL_NAMESPACE_END




More information about the Bf-blender-cvs mailing list