This shader (code at the end) uses raymarching to render procedural geometry:
However, in the image (above) the cube in the background should be partially occluding the pink solid; it isn't because of this:
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
...
o.zvalue = IF(output[1] > 0, 0, 1);
}
However, I cannot for the life of my figure out how to correctly generate a depth value here that correctly allows raymarched solids to obscure / not obscure the other geometry in the scene.
I know it's possible, because there's a working example here: https://github.com/i-saint/RaymarchingOnUnity5 (associated japanese language blog http://i-saint.hatenablog.com/)
However, it's in japanese, and largely undocumented, as well as being extremely complex.
I'm looking for an extremely simplified version of the same thing, from which to build on.
In the shader I'm currently using the fragment program line:
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
Maps an input point p on the quad need the camera (which this shader attached to it), into an output float2 (density, distance), where distance is the distance from the quad to the 'point' on the procedural surface.
The question is, how do I map that into a depth buffer in any useful way?
The complete shader is here, to use it, create a new scene with a sphere at 0,0,0 with a size of at least 50 and assign the shader to it:
Shader "Shaders/Raymarching/BasicMarch" {
Properties {
_sun ("Sun", Vector) = (0, 0, 0, 0)
_far ("Far Depth Value", Float) = 20
_edgeFuzz ("Edge fuzziness", Range(1, 20)) = 1.0
_lightStep ("Light step", Range(0.1, 5)) = 1.0
_step ("Raycast step", Range(0.1, 5)) = 1.0
_dark ("Dark value", Color) = (0, 0, 0, 0)
_light ("Light Value", Color) = (1, 1, 1, 1)
[Toggle] _debugDepth ("Display depth field", Float) = 0
[Toggle] _debugLight ("Display light field", Float) = 0
}
SubShader {
Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
#include "UnityCG.cginc"
#include "UnityLightingCommon.cginc" // for _LightColor0
#define IF(a, b, c) lerp(b, c, step((fixed) (a), 0));
uniform float _far;
uniform float _lightStep;
uniform float3 _sun;
uniform float4 _light;
uniform float4 _dark;
uniform float _debugDepth;
uniform float _debugLight;
uniform float _edgeFuzz;
uniform float _step;
/**
* Sphere at origin c, size s
* @param center_ The center of the sphere
* @param radius_ The radius of the sphere
* @param point_ The point to check
*/
float geom_soft_sphere(float3 center_, float radius_, float3 point_) {
float rtn = distance(center_, point_);
return IF(rtn < radius_, (radius_ - rtn) / radius_ / _edgeFuzz, 0);
}
/**
* A rectoid centered at center_
* @param center_ The center of the cube
* @param halfsize_ The halfsize of the cube in each direction
*/
float geom_rectoid(float3 center_, float3 halfsize_, float3 point_) {
float rtn = IF((point_[0] < (center_[0] - halfsize_[0])) || (point_[0] > (center_[0] + halfsize_[0])), 0, 1);
rtn = rtn * IF((point_[1] < (center_[1] - halfsize_[1])) || (point_[1] > (center_[1] + halfsize_[1])), 0, 1);
rtn = rtn * IF((point_[2] < (center_[2] - halfsize_[2])) || (point_[2] > (center_[2] + halfsize_[2])), 0, 1);
rtn = rtn * distance(point_, center_);
float radius = length(halfsize_);
return IF(rtn > 0, (radius - rtn) / radius / _edgeFuzz, 0);
}
/**
* Calculate procedural geometry.
* Return (0, 0, 0) for empty space.
* @param point_ A float3; return the density of the solid at p.
* @return The density of the procedural geometry of p.
*/
float march_geometry(float3 point_) {
return
geom_rectoid(float3(0, 0, 0), float3(7, 7, 7), point_) +
geom_soft_sphere(float3(10, 0, 0), 7, point_) +
geom_soft_sphere(float3(-10, 0, 0), 7, point_) +
geom_soft_sphere(float3(0, 0, 10), 7, point_) +
geom_soft_sphere(float3(0, 0, -10), 7, point_);
}
/** Return a randomish value to sample step with */
float rand(float3 seed) {
return frac(sin(dot(seed.xyz ,float3(12.9898,78.233,45.5432))) * 43758.5453);
}
/**
* March the point p along the cast path c, and return a float2
* which is (density, depth); if the density is 0 no match was
* found in the given depth domain.
* @param point_ The origin point
* @param cast_ The cast vector
* @param max_ The maximum depth to step to
* @param step_ The increment to step in
* @return (denity, depth)
*/
float2 march_raycast(float3 point_, float3 cast_, float max_, float step_) {
float origin_ = point_;
float depth_ = 0;
float density_ = 0;
int steps = floor(max_ / step_);
for (int i = 0; (density_ <= 1) && (i < steps); ++i) {
float3 target_ = point_ + cast_ * i * step_ + rand(point_) * cast_ * step_;
density_ += march_geometry(target_);
depth_ = IF((depth_ == 0) && (density_ != 0), distance(point_, target_), depth_);
}
density_ = IF(density_ > 1, 1, density_);
return float2(density_, depth_);
}
/**
* Simple lighting; raycast from depth point to light source, and get density on path
* @param point_ The origin point on the render target
* @param cast_ The original cast (ie. camera view direction)
* @param raycast_ The result of the original raycast
* @param max_ The max distance to cast
* @param step_ The step increment
*/
float2 march_lighting(float3 point_, float3 cast_, float2 raycast_, float max_, float step_) {
float3 target_ = point_ + cast_ * raycast_[1];
float3 lcast_ = normalize(_sun - target_);
return march_raycast(target_, lcast_, max_, _lightStep);
}
struct fragmentInput {
float4 position : SV_POSITION;
float4 worldpos : TEXCOORD0;
float3 viewdir : TEXCOORD1;
};
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentInput vert(appdata_base i) {
fragmentInput o;
o.position = mul(UNITY_MATRIX_MVP, i.vertex);
o.worldpos = mul(_Object2World, i.vertex);
o.viewdir = -normalize(WorldSpaceViewDir(i.vertex));
return o;
}
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
// Raycast
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
float2 light = march_lighting(i.worldpos, i.viewdir, output, _far, _step);
float lvalue = 1.0 - light[0];
float depth = output[1] / _far;
// Generate fragment color
float4 color = lerp(_light, _dark, lvalue);
// Debugging: Depth
float4 debug_depth = float4(depth, depth, depth, 1);
color = IF(_debugDepth, debug_depth, color);
// Debugging: Color
float4 debug_light = float4(lvalue, lvalue, lvalue, 1);
color = IF(_debugLight, debug_light, color);
// Always apply the depth map
color.a = output[0];
o.zvalue = IF(output[1] > 0, 0, 1);
o.color = IF(output[1] <= 0, 0, color);
return o;
}
ENDCG
}
}
}
(Yes, I know it's quite complex, but it's very difficult to reduce this kind of shader into a 'simple test case' to play with)
I'll happy accept any answer which is a modification of the shader above that allows the procedural solid to be obscured / obscure other geometry in the scene as though is was 'real geometry'.
--
Edit: You can get this 'working' by explicitly setting the depth value on the other geometry in the scene using the same depth function as the raymarcher:
...however, I still cannot get this to work correctly with geometry using the 'standard' shader. Still hunting for a working solution...
The depth buffer is automatically created by the windowing system and stores its depth values as 16 , 24 or 32 bit floats. In most systems you'll see a depth buffer with a precision of 24 bits. When depth testing is enabled, OpenGL tests the depth value of a fragment against the content of the depth buffer.
In a 3d-rendering pipeline, when an object is projected on the screen, the depth (z-value) of a generated fragment in the projected screen image is compared to the value already stored in the buffer (depth test), and replaces it if the new value is closer.
Looking at the project you linked to, the most important difference I see is that their raycast march function uses a pass-by-reference parameter to return a fragment position called ray_pos
. That position appears to be in object space, so they transform it using the view-projection matrix to get clip space and read a depth value.
The project also has a compute_depth
function, but it looks pretty simple.
Your march_raycast
function is already calculating a target_
position, so you could refactor a bit, apply the out
keyword to return it to the caller, and use it in depth calculations:
//get position using pass-by-ref
float3 ray_pos = i.worldpos;
float2 output = march_raycast(ray_pos, i.viewdir, _far, _step);
...
//convert position to clip space, read depth
float4 clip_pos = mul(UNITY_MATRIX_VP, float4(ray_pos, 1.0));
o.zvalue = clip_pos.z / clip_pos.w;
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With