我正在尝试使用着色器创建一个简单的地下散射效果,但是我面临一个小问题。
查看这些屏幕截图。这三个图像代表三种照明状态(表面上方,实际上非常接近表面,地下),具有多种照明颜色(红色和蓝色),并且始终具有相同的地下颜色(红色)。
您可能会注意到,当光线在表面上方并且确实靠近该表面时,其影响似乎最小化了预期的行为。但是问题在于,对于次表面部分来说,行为是相同的,根据我的着色器代码,这是正常现象,但我认为当接近表面时,次表面光的影响应该更高。我建议您看一下屏幕截图以获得预期的结果。
我怎样才能做到这一点 ?
这是简化的着色器代码。
half ndotl = max(0.0f, dot(normalWorld, lightDir));
half inversendotl = max(0.0f, dot(normalWorld, -lightDir));
half3 lightColor = _LightColor0.rgb * ndotl; // This is the normal light color calculation
half3 subsurfacecolor = translucencyColor.rgb * inversendotl; // This is the subsurface color
half3 topsubsurfacecolor = translucencyColor.rgb; // This is used for adding subsurface color to top surface
final = subsurfacescolor + lerp(lightColor, topsubsurfacecolor * 0.5, 1 - ndotl - inversendotl);
实现地下散射效果的方式非常粗糙。使用如此简单的方法很难获得良好的结果。遵循选定的方法,我建议您做以下事情:
根据平方反比定律考虑到光源的距离。这适用于直射光和地下两个组件。
一旦光线在表面后面,最好忽略内部法线和光线方向的点积,因为您永远不知道光线将如何穿过对象。另一个原因是,由于折射定律(假设物体的折射系数高于空气的折射系数)使得该点积的影响较小。一旦光源在表面后面,您就可以使用步进功能来打开地下组件。
因此,着色器的修改后的版本如下所示:
half3 toLightVector = u_lightPos - v_fragmentPos;
half lightDistanceSQ = dot(toLightVector, toLightVector);
half3 lightDir = normalize(toLightVector);
half ndotl = max(0.0, dot(v_normal, lightDir));
half inversendotl = step(0.0, dot(v_normal, -lightDir));
half3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0;
half3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0;
half3 final = subsurfacecolor + lightColor;
其中u_lightPos-包含光源位置的制服,v_fragmentPos-包含片段位置的变化。
这是使用three.js的glsl示例:
var container;
var camera, scene, renderer;
var sssMesh;
var lightSourceMesh;
var sssUniforms;
var clock = new THREE.Clock();
init();
animate();
function init() {
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(40, window.innerWidth / window.innerHeight, 1, 3000);
camera.position.z = 4;
camera.position.y = 2;
camera.rotation.x = -0.45;
scene = new THREE.Scene();
var boxGeometry = new THREE.CubeGeometry(0.75, 0.75, 0.75);
var lightSourceGeometry = new THREE.CubeGeometry(0.1, 0.1, 0.1);
sssUniforms = {
u_lightPos: {
type: "v3",
value: new THREE.Vector3()
}
};
var sssMaterial = new THREE.ShaderMaterial({
uniforms: sssUniforms,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragment_shader').textContent
});
var lightSourceMaterial = new THREE.MeshBasicMaterial();
sssMesh = new THREE.Mesh(boxGeometry, sssMaterial);
sssMesh.position.x = 0;
sssMesh.position.y = 0;
scene.add(sssMesh);
lightSourceMesh = new THREE.Mesh(lightSourceGeometry, lightSourceMaterial);
lightSourceMesh.position.x = 0;
lightSourceMesh.position.y = 0;
scene.add(lightSourceMesh);
renderer = new THREE.WebGLRenderer();
container.appendChild(renderer.domElement);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function onWindowResize(event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
var delta = clock.getDelta();
var lightHeight = Math.sin(clock.elapsedTime * 1.0) * 0.5 + 0.7;
lightSourceMesh.position.y = lightHeight;
sssUniforms.u_lightPos.value.y = lightHeight;
sssMesh.rotation.y += delta * 0.5;
renderer.render(scene, camera);
}
body {
color: #ffffff;
background-color: #050505;
margin: 0px;
overflow: hidden;
}
<script src="http://threejs.org/build/three.min.js"></script>
<div id="container"></div>
<script id="fragment_shader" type="x-shader/x-fragment">
varying vec3 v_fragmentPos;
varying vec3 v_normal;
uniform vec3 u_lightPos;
void main(void)
{
vec3 _LightColor0 = vec3(1.0,0.5,0.5);
float _LightIntensity0 = 0.2;
vec3 translucencyColor = vec3(0.8,0.2,0.2);
vec3 toLightVector = u_lightPos - v_fragmentPos;
float lightDistanceSQ = dot(toLightVector, toLightVector);
vec3 lightDir = normalize(toLightVector);
float ndotl = max(0.0, dot(v_normal, lightDir));
float inversendotl = step(0.0, dot(v_normal, -lightDir));
vec3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0;
vec3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0;
vec3 final = subsurfacecolor + lightColor;
gl_FragColor=vec4(final,1.0);
}
</script>
<script id="vertexShader" type="x-shader/x-vertex">
varying vec3 v_fragmentPos;
varying vec3 v_normal;
void main()
{
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
v_fragmentPos = (modelMatrix * vec4( position, 1.0 )).xyz;
v_normal = (modelMatrix * vec4( normal, 0.0 )).xyz;
gl_Position = projectionMatrix * mvPosition;
}
</script>
有许多不同的SSS仿真技术。纹理空间扩散和基于阴影贴图的半透明是最常用的技术。
检查这个文章从GPU的宝石,它描述了上述技术。此外,您还可以发现,有趣这从EA的介绍。它提到了与渲染植物非常接近的方法。
球谐函数也适用于静态几何体,但是这种方法非常复杂,需要预先计算的辐照度传递。检查本文,该文章显示了使用球谐函数来近似SSS。
本文收集自互联网,转载请注明来源。
如有侵权,请联系 [email protected] 删除。
我来说两句