• 十六,镜面IBL--预滤波环境贴图


    又到了开心的公式时刻了。
    先看看渲染方程
    在这里插入图片描述
    现在关注第二部分,镜面反射。
    在这里插入图片描述
    其中在这里插入图片描述
    这里很棘手,与输入wi和输出w0都有关系,所以,再近似
    在这里插入图片描述
    其中第一部分,就是预滤波环境贴图,形式上与前面的辐照度图很相似,那么能不能用同样的方法呢?
    先看看镜面反射和漫反射的图
    在这里插入图片描述
    可以看到,镜面反射是绕着出射向量的一个范围(成为波瓣),而漫反射是绕着法线方向均匀分布的。
    再想想积分辐照度图时,是以法线向量为中心,进行积分的。在这里插入图片描述
    那很自然的想到,积分镜面反射的预滤波环境贴图可以以出射向量为中心,在波瓣范围内积分。

    然而, 波瓣有大有小,是因为粗糙度不同,
    在这里插入图片描述
    所以,不能只积分一次,而是多次,按照不同粗糙度积分后写到mipmap,或者单独的纹理中。这里为了方便,分别写到不同的纹理中。

    那么该如何积分呢?辐照度图是在经度0到360,纬度0到90内均匀积分。
    在这里插入图片描述

    而镜面反射中,给定入射方向,波瓣指向方向就是微平面半向量的反射方向。所以,只在波瓣内积分就可以了,即重要性采样。
    这时就可以使用蒙特卡洛积分,即在大数定律基础上,采取N样本即可。N越大越准。pdf为概率密度函数。
    在这里插入图片描述
    比如
    在这里插入图片描述
    采样样本越多,越靠近中间范围。因为中间范围概率大。

    以上为均匀采样,

    如果采样样本有偏,则会更快收敛。比如通过低差异序列获取样本。
    float RadicalInverse_VdC(uint bits)
    {
    bits = (bits << 16u) | (bits >> 16u);
    bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
    bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
    bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
    bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
    return float(bits) * 2.3283064365386963e-10; // / 0x100000000
    }

    vec2 Hammersley(uint i, uint N)
    {
    return vec2(float(i)/float(N), RadicalInverse_VdC(i));
    }
    或者无位运算的
    "float VanDerCorpus(uint n, uint base) "
    "{ "
    " float invBase = 1.0 / float(base); "
    " float denom = 1.0; "
    " float result = 0.0; "
    " for (uint i = 0u; i < 32u; ++i) "
    " { "
    " if (n > 0u) "
    " { "
    " denom = mod(float(n), 2.0); "
    " result += denom * invBase; "
    " invBase = invBase / 2.0; "
    " n = uint(float(n) / 2.0); "
    " } "
    " } "
    "return result; "
    "} "
    " "
    "vec2 HammersleyNoBitOps(uint i, uint N) "
    "{ "
    " return vec2(float(i) / float(N), VanDerCorpus(i, 2u)); "
    "} "

    然后根据法线方向,粗糙度和低差异序列生成采样向量,该向量大体围绕着预估的波瓣方向。
    “vec3 ImportanceSampleGGX(vec2 Xi, vec3 N, float roughness)”
    “{”
    “float a = roughness * roughness;”
    “float phi = 2.0 * PI * Xi.x;”
    “float cosTheta = sqrt((1.0 - Xi.y)/(1.0+(a*a-1.0) * Xi.y));”
    “float sinTheta = sqrt(1.0 - cosTheta * cosTheta);”
    “vec3 H;”
    “H.x = cos(phi) * sinTheta;”
    “H.y = sin(phi) * sinTheta;”
    “H.z = cosTheta;”
    “vec3 up = abs(N.z) < 0.999 ? vec3(0.0,0.0,1.0) : vec3(1.0,0.0,0.0);”
    “vec3 tangent = normalize(cross(up,N));”
    “vec3 bitangent = cross(N,tangent);”
    “vec3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z;”
    “return normalize(sampleVec);”
    “}”

    因为镜面反射的波瓣随着粗糙度而变化,所以可以按照mipmap不同的级别设置粗糙度
    
    float minMipMapLevel = 0.0;
    float maxMipMapLevel = 4.0;
    
    float theMip = 3.0;//0,1,2,3,4
    float roughness = theMip / maxMipMapLevel;
    
    int textureSize = 128 * std::pow(0.5, theMip);
    
    
    osg::ref_ptr tcm = new osg::TextureCubeMap;
    tcm->setTextureSize(textureSize, textureSize);
    
    osg::ref_ptr roughnessUniform = new osg::Uniform("roughness", roughness);
    stateset->addUniform(roughnessUniform);
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16

    根据不同的mipmap级别,
    运行结果如下
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述

    代码如下
    #include
    #include
    #include
    #include
    #include
    #include
    #include
    #include

    static const char * vertexShader =
    {
    //“#version 120 core\n”
    “in vec3 aPos;\n”
    “varying vec3 localPos;\n”
    “void main(void)\n”
    “{\n”
    “localPos = aPos;\n”
    " gl_Position = ftransform();\n"
    //“gl_Position = view * view * vec4(aPos,1.0);”
    “}\n”
    };

    static const char psShader =
    {
    “varying vec3 localPos;\n”
    “uniform samplerCube environmentMap;”
    “uniform float roughness;”
    “const float PI = 3.1415926;”
    “float VanDerCorpus(uint n, uint base) "
    “{ "
    " float invBase = 1.0 / float(base); "
    " float denom = 1.0; "
    " float result = 0.0; "
    " for (uint i = 0u; i < 32u; ++i) "
    " { "
    " if (n > 0u) "
    " { "
    " denom = mod(float(n), 2.0); "
    " result += denom * invBase; "
    " invBase = invBase / 2.0; "
    " n = uint(float(n) / 2.0); "
    " } "
    " } "
    “return result; "
    “} "
    " "
    “vec2 HammersleyNoBitOps(uint i, uint N) "
    “{ "
    " return vec2(float(i) / float(N), VanDerCorpus(i, 2u)); "
    “} "
    //“float RadicalInverse_Vdc(uint bits)\n”
    //”{”
    //“bits = (bits << 16u) | (bits >> 16u);”
    //“bits = ((bits & 0x55555555u) << 1u ) | (bits & 0xAAAAAAAAu) >> 1u);”
    //“bits = ((bits & 0x33333333u) << 2u ) | (bits & 0xCCCCCCCCu) >> 2u);”
    //“bits = ((bits & 0x0F0F0F0Fu) << 4u ) | (bits & 0xF0F0F0F0u) >> 4u);”
    //“bits = ((bits & 0x00FF00FFu) << 8u ) | (bits & 0xFF00FF00u) >> 8u);”
    //“return float(bits) * 2.3283064365386963e-10;”
    //”}”
    //“vec2 Hammersley(uint i, uint N)”
    //”{”
    //“return vec2(float(i) / float(N), RadicalInverse_Vdc(i));”
    //”}"
    “vec3 ImportanceSampleGGX(vec2 Xi, vec3 N, float roughness)”
    “{”
    “float a = roughness * roughness;”
    “float phi = 2.0 * PI * Xi.x;”
    "float cosTheta = sqrt((1.0 - Xi.y)/(1.0+(a
    a-1.0) * Xi.y));"
    “float sinTheta = sqrt(1.0 - cosTheta * cosTheta);”
    “vec3 H;”
    “H.x = cos(phi) * sinTheta;”
    “H.y = sin(phi) * sinTheta;”
    “H.z = cosTheta;”
    “vec3 up = abs(N.z) < 0.999 ? vec3(0.0,0.0,1.0) : vec3(1.0,0.0,0.0);”
    “vec3 tangent = normalize(cross(up,N));”
    “vec3 bitangent = cross(N,tangent);”
    “vec3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z;”
    “return normalize(sampleVec);”
    “}”
    "void main() "
    "{ "
    " vec3 N = normalize(localPos); "
    " vec3 R = N; "
    " vec3 V = R; "
    " "
    " const uint SAMPLE_COUNT = 1024u; "
    " float totalWeight = 0.0; "
    " vec3 prefilteredColor = vec3(0.0); "
    " for (uint i = 0u; i < SAMPLE_COUNT; ++i) "
    " { "
    " vec2 Xi = HammersleyNoBitOps(i, SAMPLE_COUNT); "
    " vec3 H = ImportanceSampleGGX(Xi, N, roughness); "
    " vec3 L = normalize(2.0 * dot(V, H) * H - V); "
    " "
    " float NdotL = max(dot(N, L), 0.0); "
    " if (NdotL > 0.0) "
    " { "
    " prefilteredColor += texture(environmentMap, L).rgb * NdotL; "
    " totalWeight += NdotL; "
    " } "
    " } "
    " prefilteredColor = prefilteredColor / totalWeight; "
    " "
    " gl_FragColor = vec4(prefilteredColor, 1.0); "
    "} "
    };
    class MyNodeVisitor : public osg::NodeVisitor
    {
    public:
    MyNodeVisitor() : osg::NodeVisitor(osg::NodeVisitor::TRAVERSE_ALL_CHILDREN)
    {

    }
    void apply(osg::Geode& geode)
    {
    	int count = geode.getNumDrawables();
    	for (int i = 0; i < count; i++)
    	{
    		osg::ref_ptr geometry = geode.getDrawable(i)->asGeometry();
    		if (!geometry.valid())
    		{
    			continue;
    		}
    		osg::Array* vertexArray = geometry->getVertexArray();
    		geometry->setVertexAttribArray(1, vertexArray);
    
    	}
    	traverse(geode);
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17

    };

    int main()
    {

    osg::ref_ptr viewer = new osgViewer::Viewer;
    
    float minMipMapLevel = 0.0;
    float maxMipMapLevel = 4.0;
    
    float theMip = 4.0;//0,1,2,3,4
    float roughness = theMip / maxMipMapLevel;
    
    int textureSize = 128 * std::pow(0.5, theMip);
    
    
    osg::ref_ptr tcm = new osg::TextureCubeMap;
    tcm->setTextureSize(textureSize, textureSize);
    tcm->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR_MIPMAP_LINEAR);
    tcm->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
    tcm->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
    tcm->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
    tcm->setWrap(osg::Texture::WRAP_R, osg::Texture::CLAMP_TO_EDGE);
    
    std::string strImagePosX = "D:/hdr/Right face camera.bmp";
    osg::ref_ptr imagePosX = osgDB::readImageFile(strImagePosX);
    tcm->setImage(osg::TextureCubeMap::POSITIVE_X, imagePosX);
    std::string strImageNegX = "D:/hdr/Left face camera.bmp";
    osg::ref_ptr imageNegX = osgDB::readImageFile(strImageNegX);
    tcm->setImage(osg::TextureCubeMap::NEGATIVE_X, imageNegX);
    
    std::string strImagePosY = "D:/hdr/Front face camera.bmp";;
    osg::ref_ptr imagePosY = osgDB::readImageFile(strImagePosY);
    tcm->setImage(osg::TextureCubeMap::POSITIVE_Y, imagePosY);
    std::string strImageNegY = "D:/hdr/Back face camera.bmp";;
    osg::ref_ptr imageNegY = osgDB::readImageFile(strImageNegY);
    tcm->setImage(osg::TextureCubeMap::NEGATIVE_Y, imageNegY);
    
    std::string strImagePosZ = "D:/hdr/Top face camera.bmp";
    osg::ref_ptr imagePosZ = osgDB::readImageFile(strImagePosZ);
    tcm->setImage(osg::TextureCubeMap::POSITIVE_Z, imagePosZ);
    
    std::string strImageNegZ = "D:/hdr/Bottom face camera.bmp";
    osg::ref_ptr imageNegZ = osgDB::readImageFile(strImageNegZ);
    tcm->setImage(osg::TextureCubeMap::NEGATIVE_Z, imageNegZ);
    tcm->setUseHardwareMipMapGeneration(true);
    tcm->setMinLOD(minMipMapLevel);
    tcm->setMaxLOD(maxMipMapLevel);
    
    osg::ref_ptr box = new osg::Box(osg::Vec3(0, 0, 0), 1);
    osg::ref_ptr drawable = new osg::ShapeDrawable(box);
    osg::ref_ptr geode = new osg::Geode;
    geode->addDrawable(drawable);
    MyNodeVisitor nv;
    geode->accept(nv);
    osg::ref_ptr stateset = geode->getOrCreateStateSet();
    stateset->setTextureAttributeAndModes(0, tcm, osg::StateAttribute::OVERRIDE | osg::StateAttribute::ON);
    
    //shader
    
    osg::ref_ptr vs1 = new osg::Shader(osg::Shader::VERTEX, vertexShader);
    osg::ref_ptr ps1 = new osg::Shader(osg::Shader::FRAGMENT, psShader);
    osg::ref_ptr program1 = new osg::Program;
    program1->addShader(vs1);
    program1->addShader(ps1);
    program1->addBindAttribLocation("aPos", 1);
    
    osg::ref_ptr environmentMapUniform = new osg::Uniform("environmentMap", 0);
    stateset->addUniform(environmentMapUniform);
    osg::ref_ptr roughnessUniform = new osg::Uniform("roughness", roughness);
    stateset->addUniform(roughnessUniform);
    
    stateset->setAttribute(program1, osg::StateAttribute::ON);
    
    viewer->setSceneData(geode);
    viewer->realize();
    return viewer->run();
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72

    }

  • 相关阅读:
    耗时3年写了一本数据结构与算法pdf!开源了
    不同类型的供应商应如何有效合作(中)
    【技术积累】Linux中的命令行【理论篇】【六】
    stm32单片机个人学习笔记3(GPIO输出)
    接口开放太麻烦?试试阿里云API网关吧
    python+vue+elementui咖啡厅推荐信息管理系统
    Spring+MyBatis使用collection标签的两种使用方法
    js函数传参 有默认参数时,不覆盖原有参数并传入新的参数
    云原生Kubernetes:K8S集群使用带凭证的harbor仓库
    uniapp下载附件保存到手机(文件、图片)ios兼容
  • 原文地址:https://blog.csdn.net/directx3d_beginner/article/details/133362250