Hi guys,
just wanted to share the code I've used to study the position reconstruction problem.
This code lets you switch easly between linear depth and post-projection depth, and I'll show also a way to check if the reconstruction is correct.
Following is the code to convert between Camera/View space to PostProjection space and viceversa.
// This must be done on the CPU and passed to shaders:
float2 getProjParams()
{
//#define PROJ_STANDARD
#ifdef PROJ_STANDARD
float rangeInv = 1 / (gFar - gNear);
float A = -(gFar + gNear) * rangeInv;
float B = -2 * gFar * gNear * rangeInv;
#else // We get rid of the minus by just inverting the denominator (faster):
float rangeInv = 1 / (gNear - gFar);
float A = (gFar + gNear) * rangeInv;
float B = 2 * gFar * gNear * rangeInv;
#endif
return float2(A, B);
}
// Input: 0..-far - Output: -1..1
float postDepthFromViewDepth( float depthVS )
{
float2 projParams = getProjParams();
// Zn = (A * Ze + B) / -Ze
// Zn = -A - (B/Ze)
float depthPS = -projParams.x - (projParams.y / depthVS);
return depthPS;
}
// Input: -1..1 - Output: 0..-far
float viewDepthFromPostDepth( float depthPS )
{
float2 projParams = getProjParams();
// Ze = -B / (Zn + A)
float depthVS = -projParams.y / (projParams.x + depthPS);
return depthVS;
}
Next I'll show some helper functions to encode/decode the depth in different spaces.
///////////////////////////////////////////////////
// POST PROJECTION SPACE
///////////////////////////////////////////////////
// Returns post-projection depth
float decodeProjDepth( float2 uv )
{
return tex2D( depthMap, uv ).r;
}
// Returns viewspace depth from projection (negative for left-handed)
float decodeViewDepthFromProjection( float2 uv )
{
float depthPS = decodeProjDepth( uv );
return viewDepthFromPostDepth( depthPS );;
}
// Returns depth in range 0..1
float decodeLinearDepthFromProjection( float2 uv )
{
float depthVS = decodeViewDepthFromProjection( uv );
// Left handed coords needs the minus,
// because the depth is negative
// and we are converting towards 0..1 domain
return -depthVS / gFar;
}
// Returns post-projection depth
float encodePostProjectionDepth( float depthViewSpace )
{
return postDepthFromViewDepth( depthViewSpace );
}
///////////////////////////////////////////////////
// VIEW/CAMERA SPACE
///////////////////////////////////////////////////
// Returns stored linear depth (0..1)
float decodeLinearDepthRaw( float2 uv )
{
return tex2D( depthMap, uv ).r;
}
// Returns viewspace depth (0..-far)
float decodeViewDepthFromLinear( float2 uv )
{
return decodeLinearDepthRaw( uv ) * -gFar;
}
// Returns linear depth from left-handed viewspace depth (0..-far)
float encodeDepthLinear( float depthViewSpace )
{
return -depthViewSpace / gFar;
}
With simple defines we can control and switch between using a linear depth or a post-projection (raw) depth buffer to check that our calculations are fine:
#ifdef DEPTH_LINEAR
#define encodeDepth encodeDepthLinear
#define decodeViewDepth decodeViewDepthFromLinear
#define decodeLinearDepth decodeLinearDepthRaw
#else
#define encodeDepth encodePostProjectionDepth
#define decodeViewDepth decodeViewDepthFromProjection
#define decodeLinearDepth decodeLinearDepthFromProjection
#endif
Now all the reconstruction methods, both the slow and the one that uses rays:
// View-space position
float3 getPositionVS( float2 uv )
{
float depthVS = decodeLinearDepth(uv);
//float4 positionPS = float4((uv.x-0.5) * 2, (0.5-uv.y) * 2, 1, 1);
float4 positionPS = float4( (uv - 0.5) * float2(2, -2), 1, 1 );
float4 ray = mul( gProjI, positionPS );
ray.xyz /= ray.w;
return ray.xyz * depthVS * gFar;
}
float3 getPositionVS( float2 uv, float3 ray )
{
float depthLin = decodeLinearDepth(uv);
return ray.xyz * depthLin;
}
float3 getPositionWS( float2 uv )
{
float3 positionVS = getPositionVS( uv );
float4 positionWS = mul( gViewI, float4(positionVS, 1) );
return positionWS.xyz;
}
float3 getPositionWS( float2 uv, float3 viewDirectionWS )
{
float depthVS = decodeViewDepth(uv);
#if defined(METHOD1)
// Super-slow method ( 2 matrix-matrix mul )
float4 pps = mul( gProj, float4(getPositionVS( uv ), 1) );
float4 positionWS = mul( gViewProjI, pps );
positionWS /= positionWS.w;
return positionWS.xyz;
#elif defined(METHOD2)
// Known working slow method
float3 positionWS = getPositionWS( uv );
#else return positionWS .xyz;
// Super fast method
viewDirectionWS = normalize(viewDirectionWS.xyz);
float3 zAxis = gView._31_32_33;
float zScale = dot( zAxis, viewDirectionWS );
float3 positionWS = getCameraPosition() + viewDirectionWS * depthVS / zScale;
return positionWS;
#endif // METHOD1,2,3
}
Last but not least, I'll show you the code to encode the depth and perform a simple calculation to understand if we've done right:
// This is only with an educational purpose,
// so that we can switch towards storing
// viewspace or postprojection depth.
// In the GBuffer creation, the depth stored like this:
depth = float4( encodeDepth(IN.positionViewSpace.z), 1, 1, 1);
// A very cheap and easy way to detect if
// we've worked correctly is to add a
// point-light and light a bit more
// the rendering with something like:
#ifdef POSITION_RECONSTRUCTION_VIEWSPACE
float4 lightPos = mul( gView, float4(100.0, 0.0, 0.0, 1.0) );
float3 pixelPos = getPositionVS( uv, viewDirVS );
#else
float4 lightPos = float4(100.0, 0.0, 0.0, 1.0);
float3 pixelPos = getPositionWS( uv, viewDirWS );
#endif
Note the different rays, viewDirVS and viewDirWS. They are calculated as MJP showed a lot of time and two different ways, one for meshes and the other for fullscreen quads. I think that's all for now, I'll attach a screenshot of the simple test I've used to test the reconstruciton. Note that the light is the same in all the conditions, view/world space reconstruction, linear/postprojection depth storage.
Enjoy!!!
No comments:
Post a Comment