表现框架之SurfaceFlinger GPU合成

程序员 2024-9-17 10:16:30 145 0 来自 中国
对于不支持device合成的layer,SurfaceFlinger会采取GPU来合成,然后与device合成的layer在hwc举行同步再送给屏幕。
在SurfaceFlinger启动的时间就已经搭建好了EGL情况,具体如下:
文件:frameworks/native/services/surfaceflinger/SurfaceFlinger.cppvoid SurfaceFlinger::init() {    ALOGI(  "SurfaceFlinger's main thread ready to run. "            "Initializing graphics H/W...");    Mutex::Autolock _l(mStateLock);    // Get a RenderEngine for the given display / config (can't fail)    // TODO(b/77156734): We need to stop casting and use HAL types when possible.    // Sending maxFrameBufferAcquiredBuffers as the cache size is tightly tuned to single-display.    // 创建RenderEngine对象    mCompositionEngine->setRenderEngine(renderengine::RenderEngine::create(            renderengine::RenderEngineCreationArgs::Builder()                .setPixelFormat(static_cast<int32_t>(defaultCompositionPixelFormat))                .setImageCacheSize(maxFrameBufferAcquiredBuffers)                .setUseColorManagerment(useColorManagement)                .setEnableProtectedContext(enable_protected_contents(false))                .setPrecacheToneMapperShaderOnly(false)                .setSupportsBackgroundBlur(mSupportsBlur)                .setContextPriority(useContextPriority                        ? renderengine::RenderEngine::ContextPriority::HIGH                        : renderengine::RenderEngine::ContextPriority::MEDIUM)                .build()));文件:frameworks/native/libs/renderengine/RenderEngine.cppstd::unique_ptr<impl::RenderEngine> RenderEngine::create(const RenderEngineCreationArgs& args) {    char prop[PROPERTY_VALUE_MAX];     // 假如PROPERTY_DEBUG_RENDERENGINE_BACKEND 属性不设,则默认是gles范例    property_get(PROPERTY_DEBUG_RENDERENGINE_BACKEND, prop, "gles");    if (strcmp(prop, "gles") == 0) {        ALOGD("RenderEngine GLES Backend");        // 创建GLESRenderEngine对象        return renderengine::gl::GLESRenderEngine::create(args);    }    ALOGE("UNKNOWN BackendType: %s, create GLES RenderEngine.", prop);    return renderengine::gl::GLESRenderEngine::create(args);}文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppstd::unique_ptr<GLESRenderEngine> GLESRenderEngine::create(const RenderEngineCreationArgs& args) {    // initialize EGL for the default display    // 得到EGLDisplay    EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);    if (!eglInitialize(display, nullptr, nullptr)) {        LOG_ALWAYS_FATAL("failed to initialize EGL");    }     // 查询EGL版本信息    const auto eglVersion = eglQueryStringImplementationANDROID(display, EGL_VERSION);    if (!eglVersion) {        checkGlError(__FUNCTION__, __LINE__);        LOG_ALWAYS_FATAL("eglQueryStringImplementationANDROID(EGL_VERSION) failed");    }    //查询EGL支持哪些拓展    const auto eglExtensions = eglQueryStringImplementationANDROID(display, EGL_EXTENSIONS);    if (!eglExtensions) {        checkGlError(__FUNCTION__, __LINE__);        LOG_ALWAYS_FATAL("eglQueryStringImplementationANDROID(EGL_EXTENSIONS) failed");    }    //根据支持的拓展设置属性,如今来看全部的属性都为true    GLExtensions& extensions = GLExtensions::getInstance();    extensions.initWithEGLStrings(eglVersion, eglExtensions);    // The code assumes that ES2 or later is available if this extension is    // supported.    EGLConfig config = EGL_NO_CONFIG;    if (!extensions.hasNoConfigContext()) {        config = chooseEglConfig(display, args.pixelFormat, /*logConfig*/ true);    }    bool useContextPriority =            extensions.hasContextPriority() && args.contextPriority == ContextPriority::HIGH;    EGLContext protectedContext = EGL_NO_CONTEXT;    if (args.enableProtectedContext && extensions.hasProtectedContent()) {        protectedContext = createEglContext(display, config, nullptr, useContextPriority,                                            Protection:ROTECTED);        ALOGE_IF(protectedContext == EGL_NO_CONTEXT, "Can't create protected context");    }    // 创建非protect的EglContext    EGLContext ctxt = createEglContext(display, config, protectedContext, useContextPriority,                                       Protection::UNPROTECTED);    LOG_ALWAYS_FATAL_IF(ctxt == EGL_NO_CONTEXT, "EGLContext creation failed");    EGLSurface dummy = EGL_NO_SURFACE;     // 支持该属性,不走if逻辑    if (!extensions.hasSurfacelessContext()) {        dummy = createDummyEglPbufferSurface(display, config, args.pixelFormat,                                             Protection::UNPROTECTED);        LOG_ALWAYS_FATAL_IF(dummy == EGL_NO_SURFACE, "can't create dummy pbuffer");    }    // eglMakeCurrent 将 EGLDisplay和EglContext 绑定    EGLBoolean success = eglMakeCurrent(display, dummy, dummy, ctxt);    LOG_ALWAYS_FATAL_IF(!success, "can't make dummy pbuffer current");    ...    std::unique_ptr<GLESRenderEngine> engine;    switch (version) {        case GLES_VERSION_1_0:        case GLES_VERSION_1_1:            LOG_ALWAYS_FATAL("SurfaceFlinger requires OpenGL ES 2.0 minimum to run.");            break;        case GLES_VERSION_2_0:        case GLES_VERSION_3_0:            // GLESRenderEngine 初始化            engine = std::make_unique<GLESRenderEngine>(args, display, config, ctxt, dummy,                                                        protectedContext, protectedDummy);            break;    }...}GLESRenderEngine::GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,                                   EGLConfig config, EGLContext ctxt, EGLSurface dummy,                                   EGLContext protectedContext, EGLSurface protectedDummy)      : renderengine::impl::RenderEngine(args),        mEGLDisplay(display),        mEGLConfig(config),        mEGLContext(ctxt),        mDummySurface(dummy),        mProtectedEGLContext(protectedContext),        mProtectedDummySurface(protectedDummy),        mVpWidth(0),        mVpHeight(0),        mFramebufferImageCacheSize(args.imageCacheSize),        mUseColorManagement(args.useColorManagement) {    // 查询可支持最大的纹理尺寸和视图巨细    glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTextureSize);    glGetIntegerv(GL_MAX_VIEWPORT_DIMS, mMaxViewportDims);    //像素数据按4字节对齐    glPixelStorei(GL_UNPACK_ALIGNMENT, 4);    glPixelStorei(GL_PACK_ALIGNMENT, 4);    ...      // 色彩空间干系设置,遇到具体场景再分析     if (mUseColorManagement) {        const ColorSpace srgb(ColorSpace::sRGB());        const ColorSpace displayP3(ColorSpace:isplayP3());        const ColorSpace bt2020(ColorSpace::BT2020());        // no chromatic adaptation needed since all color spaces use D65 for their white points.        mSrgbToXyz = mat4(srgb.getRGBtoXYZ());        mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());        mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());        mXyzToSrgb = mat4(srgb.getXYZtoRGB());        mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());        mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());        // Compute sRGB to Display P3 and BT2020 transform matrix.        // NOTE: For now, we are limiting output wide color space support to        // Display-P3 and BT2020 only.        mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;        mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;        // Compute Display P3 to sRGB and BT2020 transform matrix.        mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;        mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;        // Compute BT2020 to sRGB and Display P3 transform matrix        mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;        mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;    }    ...     // 涉及到有含糊的layer,具体场景再分析    if (args.supportsBackgroundBlur) {        mBlurFilter = new BlurFilter(*this);        checkErrors("BlurFilter creation");    }    // 创建ImageManager 线程,这个线程是管理输入的mEGLImage    mImageManager = std::make_unique<ImageManager>(this);    mImageManager->initThread();    //创建GLFramebuffer    mDrawingBuffer = createFramebuffer();    ...}    文件:frameworks/native/libs/renderengine/gl/GLFramebuffer.cpp// 创建了一个纹理ID mTextureName,和 fb ID mFramebufferNameGLFramebuffer::GLFramebuffer(GLESRenderEngine& engine)      : mEngine(engine), mEGLDisplay(engine.getEGLDisplay()), mEGLImage(EGL_NO_IMAGE_KHR) {    glGenTextures(1, &mTextureName);    glGenFramebuffers(1, &mFramebufferName);}启动之初就搭建好了EGL情况,并将当火线程与context绑定,为背面利用gl下令做好准备,然后创建了ImageManager  线程,这个线程是管理输入Buffer的EGLImage,然后创建了GLFrameBuffer,用来操纵输出的buffer。
来看下输入的纹理,在创建BufferQueueLayer时就已经对各个layer创建了纹理ID,为背面走GPU合成做准备。
文件:frameworks/native/services/surfaceflinger/SurfaceFlinger.cppstatus_t SurfaceFlinger::createBufferQueueLayer(const sp<Client>& client, std::string name,                                                uint32_t w, uint32_t h, uint32_t flags,                                                LayerMetadata metadata, PixelFormat& format,                                                sp<IBinder>* handle,                                                sp<IGraphicBufferProducer>* gbp,                                                sp<Layer>* outLayer) {    ...        args.textureName = getNewTexture();    ...}uint32_t SurfaceFlinger::getNewTexture() {    {        std::lock_guard lock(mTexturePoolMutex);        if (!mTexturePool.empty()) {            uint32_t name = mTexturePool.back();            mTexturePool.pop_back();            ATRACE_INT("TexturePoolSize", mTexturePool.size());            return name;        }        // The pool was too small, so increase it for the future        ++mTexturePoolSize;    }    // The pool was empty, so we need to get a new texture name directly using a    // blocking call to the main thread    // 每个layer,调用glGenTextures 天生纹理ID,schedule运行在sf主线程    return schedule([this] {               uint32_t name = 0;               getRenderEngine().genTextures(1, &name);               return name;           })            .get();}回到composeSurfaces这个函数,看下走GPU合成的逻辑。
文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cppstd:ptional<base::unique_fd> Output::composeSurfaces(        const Region& debugRegion, const compositionengine::CompositionRefreshArgs& refreshArgs) {...    base::unique_fd fd;    sp<GraphicBuffer> buf;    // If we aren't doing client composition on this output, but do have a    // flipClientTarget request for this frame on this output, we still need to    // dequeue a buffer.    if (hasClientComposition || outputState.flipClientTarget) {        // dequeueBuffer一块Buffer,这块Buffer作为输出        buf = mRenderSurface->dequeueBuffer(&fd);        if (buf == nullptr) {            ALOGW("Dequeuing buffer for display [%s] failed, bailing out of "                  "client composition for this frame",                  mName.c_str());            return {};        }    }    base::unique_fd readyFence;    // GPU合成时不返回    if (!hasClientComposition) {        setExpensiveRenderingExpected(false);        return readyFence;    }    ALOGV("hasClientComposition");     // 设置clientCompositionDisplay,这个是display干系参数    renderengine:isplaySettings clientCompositionDisplay;    clientCompositionDisplay.physicalDisplay = outputState.destinationClip;    clientCompositionDisplay.clip = outputState.sourceClip;    clientCompositionDisplay.orientation = outputState.orientation;    clientCompositionDisplay.outputDataspace = mDisplayColorProfile->hasWideColorGamut()       ? outputState.dataspace            : ui:ataspace::UNKNOWN;    clientCompositionDisplay.maxLuminance =            mDisplayColorProfile->getHdrCapabilities().getDesiredMaxLuminance();    // Compute the global color transform matrix.    if (!outputState.usesDeviceComposition && !getSkipColorTransform()) {        clientCompositionDisplay.colorTransform = outputState.colorTransformMatrix;    }    // Note: Updated by generateClientCompositionRequests    clientCompositionDisplay.clearRegion = Region::INVALID_REGION;    // Generate the client composition requests for the layers on this output.    // 设置clientCompositionLayers , 这个是layer的干系参数    std::vector<LayerFE:ayerSettings> clientCompositionLayers =            generateClientCompositionRequests(supportsProtectedContent,                                              clientCompositionDisplay.clearRegion,                                              clientCompositionDisplay.outputDataspace);    appendRegionFlashRequests(debugRegion, clientCompositionLayers);    // Check if the client composition requests were rendered into the provided graphic buffer. If    // so, we can reuse the buffer and avoid client composition.    // 假如cache里有类似的Buffer,则不必要重复draw一次    if (mClientCompositionRequestCache) {        if (mClientCompositionRequestCache->exists(buf->getId(), clientCompositionDisplay,                                                   clientCompositionLayers)) {            outputCompositionState.reusedClientComposition = true;            setExpensiveRenderingExpected(false);            return readyFence;        }        mClientCompositionRequestCache->add(buf->getId(), clientCompositionDisplay,                                            clientCompositionLayers);    }    // We boost GPU frequency here because there will be color spaces conversion    // or complex GPU shaders and it's expensive. We boost the GPU frequency so that    // GPU composition can finish in time. We must reset GPU frequency afterwards,    // because high frequency consumes extra battery.    // 针对有含糊layer和有复杂颜色空间转换的场景,给GPU举行提频    const bool expensiveBlurs =            refreshArgs.blursAreExpensive && mLayerRequestingBackgroundBlur != nullptr;    const bool expensiveRenderingExpected =            clientCompositionDisplay.outputDataspace == ui:ataspace:ISPLAY_P3 || expensiveBlurs;    if (expensiveRenderingExpected) {        setExpensiveRenderingExpected(true);    }    // 将clientCompositionLayers 内里的内容插入到clientCompositionLayerPointers,实质内容类似    std::vector<const renderengine:ayerSettings*> clientCompositionLayerPointers;    clientCompositionLayerPointers.reserve(clientCompositionLayers.size());    std::transform(clientCompositionLayers.begin(), clientCompositionLayers.end(),                   std::back_inserter(clientCompositionLayerPointers),                   [](LayerFE:ayerSettings& settings) -> renderengine:ayerSettings* {                       return &settings;                   });    const nsecs_t renderEngineStart = systemTime();    // GPU合成,紧张逻辑在drawLayers内里    status_t status =            renderEngine.drawLayers(clientCompositionDisplay, clientCompositionLayerPointers,                                    buf->getNativeBuffer(), /*useFramebufferCache=*/true,                                    std::move(fd), &readyFence);   ...}输入的Buffer是通过BufferLayer的prepareClientComposition 函数设到RenderEngine内里的。
文件:frameworks/native/services/surfaceflinger/BufferLayer.cppstd:ptional<compositionengine:ayerFE:ayerSettings> BufferLayer::prepareClientComposition(        compositionengine:ayerFE::ClientCompositionTargetSettings& targetSettings) {    ATRACE_CALL();    std:ptional<compositionengine:ayerFE:ayerSettings> result =            Layer::prepareClientComposition(targetSettings);     ...    const State& s(getDrawingState());    // 应用queue过来的Buffer    layer.source.buffer.buffer = mBufferInfo.mBuffer;    layer.source.buffer.isOpaque = isOpaque(s);     // acquire fence    layer.source.buffer.fence = mBufferInfo.mFence;    // 创建BufferQueueLayer时创建的texture ID    layer.source.buffer.textureName = mTextureName;    ...}至此,SurfaceFlinger调到RenderEngine内里,SurfaceFlinger的display和outputlayer的信息传到了RenderEngine,这些都是GPU合成必要的信息,然厥后看下drawLayers的流程。
文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppstatus_t GLESRenderEngine::drawLayers(const DisplaySettings& display,                                      const std::vector<const LayerSettings*>& layers,                                      ANativeWindowBuffer* const buffer,                                      const bool useFramebufferCache, base::unique_fd&& bufferFence,                                      base::unique_fd* drawFence) {    ATRACE_CALL();    if (layers.empty()) {        ALOGV("Drawing empty layer stack");        return NO_ERROR;    }     // 要等前一帧的release fence    if (bufferFence.get() >= 0) {        // Duplicate the fence for passing to waitFence.        base::unique_fd bufferFenceDup(dup(bufferFence.get()));        if (bufferFenceDup < 0 || !waitFence(std::move(bufferFenceDup))) {            ATRACE_NAME("Waiting before draw");            sync_wait(bufferFence.get(), -1);        }    }    if (buffer == nullptr) {        ALOGE("No output buffer provided. Aborting GPU composition.");        return BAD_VALUE;    }    std::unique_ptr<BindNativeBufferAsFramebuffer> fbo;    ...    if (blurLayersSize == 0) {         // 将dequeue出来的buffer绑定到FB上面,作为fbo        fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this, buffer, useFramebufferCache);文件:frameworks/native/libs/renderengine/gl/include/renderengine/RenderEngine.hclass BindNativeBufferAsFramebuffer {public:    BindNativeBufferAsFramebuffer(RenderEngine& engine, ANativeWindowBuffer* buffer,                                  const bool useFramebufferCache)          : mEngine(engine), mFramebuffer(mEngine.getFramebufferForDrawing()), mStatus(NO_ERROR) {        mStatus = mFramebuffer->setNativeWindowBuffer(buffer, mEngine.isProtected(),                                                      useFramebufferCache)                ? mEngine.bindFrameBuffer(mFramebuffer)                : NO_MEMORY;    }    ~BindNativeBufferAsFramebuffer() {        mFramebuffer->setNativeWindowBuffer(nullptr, false, /*arbitrary*/ true);        mEngine.unbindFrameBuffer(mFramebuffer);    }    status_t getStatus() const { return mStatus; }private:    RenderEngine& mEngine;    Framebuffer* mFramebuffer;    status_t mStatus;};文件: frameworks/native/libs/renderengine/gl/GLFramebuffer.cppbool GLFramebuffer::setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,                                          const bool useFramebufferCache) {    ATRACE_CALL();    if (mEGLImage != EGL_NO_IMAGE_KHR) {        if (!usingFramebufferCache) {            eglDestroyImageKHR(mEGLDisplay, mEGLImage);            DEBUG_EGL_IMAGE_TRACKER_DESTROY();        }        mEGLImage = EGL_NO_IMAGE_KHR;        mBufferWidth = 0;        mBufferHeight = 0;    }    if (nativeBuffer) {        mEGLImage = mEngine.createFramebufferImageIfNeeded(nativeBuffer, isProtected,                                                           useFramebufferCache);        if (mEGLImage == EGL_NO_IMAGE_KHR) {            return false;        }        usingFramebufferCache = useFramebufferCache;        mBufferWidth = nativeBuffer->width;        mBufferHeight = nativeBuffer->height;    }    return true;}文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppGLImageKHR GLESRenderEngine::createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer,                                                             bool isProtected,                                                             bool useFramebufferCache) {    // buffer范例转换,将ANativeWindowBuffer 转换成 GraphicsBuffer    sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(nativeBuffer);      //利用cache,假如有一样的image,就直接返回     if (useFramebufferCache) {        std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);        for (const auto& image : mFramebufferImageCache) {            if (image.first == graphicBuffer->getId()) {                return image.second;            }        }    }    EGLint attributes[] = {            isProtected ? EGL_PROTECTED_CONTENT_EXT : EGL_NONE,            isProtected ? EGL_TRUE : EGL_NONE,            EGL_NONE,    };    // 将dequeue出来的buffer作为参数创建 EGLImage    EGLImageKHR image = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,                                          nativeBuffer, attributes);    if (useFramebufferCache) {        if (image != EGL_NO_IMAGE_KHR) {            std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);            if (mFramebufferImageCache.size() >= mFramebufferImageCacheSize) {                EGLImageKHR expired = mFramebufferImageCache.front().second;                mFramebufferImageCache.pop_front();                eglDestroyImageKHR(mEGLDisplay, expired);                DEBUG_EGL_IMAGE_TRACKER_DESTROY();            }             // 把image放到mFramebufferImageCache 内里            mFramebufferImageCache.push_back({graphicBuffer->getId(), image});        }    }    if (image != EGL_NO_IMAGE_KHR) {        DEBUG_EGL_IMAGE_TRACKER_CREATE();    }    return image;}status_t GLESRenderEngine::bindFrameBuffer(Framebuffer* framebuffer) {    ATRACE_CALL();                    GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(framebuffer);    // 上一步创建的EGLImage    EGLImageKHR eglImage = glFramebuffer->getEGLImage();     // 创建RenderEngine 时就已经创建好的 texture id和 fb id    uint32_t textureName = glFramebuffer->getTextureName();    uint32_t framebufferName = glFramebuffer->getFramebufferName();    // Bind the texture and turn our EGLImage into a texture    // 绑定texture,背面的操纵将作用在这上面    glBindTexture(GL_TEXTURE_2D, textureName);     // 根据EGLImage 创建一个 2D texture    glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)eglImage);     // Bind the Framebuffer to render into    glBindFramebuffer(GL_FRAMEBUFFER, framebufferName);    // 将纹理附着在帧缓存上面,渲染到farmeBuffer    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureName, 0);    uint32_t glStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);    ALOGE_IF(glStatus != GL_FRAMEBUFFER_COMPLETE_OES, "glCheckFramebufferStatusOES error %d",             glStatus);    return glStatus == GL_FRAMEBUFFER_COMPLETE_OES ? NO_ERROR : BAD_VALUE;}起首将dequeue出来的buffer通过eglCreateImageKHR做成image,然后通过glEGLImageTargetTexture2DOES根据image创建一个2D的纹理,再通过glFramebufferTexture2D把纹理附着在帧缓存上面。setViewportAndProjection 设置视图和投影矩阵。
文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppstatus_t GLESRenderEngine::drawLayers(const DisplaySettings& display,                                      const std::vector<const LayerSettings*>& layers,                                      ANativeWindowBuffer* const buffer,                                      const bool useFramebufferCache, base::unique_fd&& bufferFence,                                      base::unique_fd* drawFence) {         ...         // 设置极点和纹理坐标的size         Mesh mesh = Mesh::Builder()                        .setPrimitive(Mesh::TRIANGLE_FAN)                        .setVertices(4 /* count */, 2 /* size */)                        .setTexCoords(2 /* size */)                        .setCropCoords(2 /* size */)                        .build();         for (auto const layer : layers) {          //遍历outputlayer             ...          //获取layer的巨细        const FloatRect bounds = layer->geometry.boundaries;        Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());        // 设置极点的坐标,逆时针方向        position[0] = vec2(bounds.left, bounds.top);        position[1] = vec2(bounds.left, bounds.bottom);        position[2] = vec2(bounds.right, bounds.bottom);        position[3] = vec2(bounds.right, bounds.top);         //设置crop的坐标        setupLayerCropping(*layer, mesh);        // 设置颜色矩阵        setColorTransform(display.colorTransform * layer->colorTransform);        ...        // Buffer干系设置        if (layer->source.buffer.buffer != nullptr) {            disableTexture = false;            isOpaque = layer->source.buffer.isOpaque;             // layer的buffer,明白为输入的buffer            sp<GraphicBuffer> gBuf = layer->source.buffer.buffer;            // textureName是创建BufferQueuelayer时天生的,用来标识这个layer,            // fence是acquire fence            bindExternalTextureBuffer(layer->source.buffer.textureName, gBuf,                                      layer->source.buffer.fence);            ...            // 设置纹理坐标,也是逆时针            renderengine::Mesh::VertexArray<vec2> texCoords(mesh.getTexCoordArray<vec2>());            texCoords[0] = vec2(0.0, 0.0);            texCoords[1] = vec2(0.0, 1.0);            texCoords[2] = vec2(1.0, 1.0);            texCoords[3] = vec2(1.0, 0.0);           // 设置纹理的参数,glTexParameteri            setupLayerTexturing(texture);        }status_t GLESRenderEngine::bindExternalTextureBuffer(uint32_t texName,                                                     const sp<GraphicBuffer>& buffer,                                                     const sp<Fence>& bufferFence) {    if (buffer == nullptr) {        return BAD_VALUE;    }    ATRACE_CALL();    bool found = false;    {        // 在ImageCache内里找有没有类似的buffer        std::lock_guard<std::mutex> lock(mRenderingMutex);        auto cachedImage = mImageCache.find(buffer->getId());        found = (cachedImage != mImageCache.end());    }    // If we couldn't find the image in the cache at this time, then either    // SurfaceFlinger messed up registering the buffer ahead of time or we got    // backed up creating other EGLImages.    if (!found) {        //假如ImageCache内里没有则必要重新创建一个EGLImage,创建输入的EGLImage是在ImageManager线程内里,利用notify唤醒机制        status_t cacheResult = mImageManager->cache(buffer);        if (cacheResult != NO_ERROR) {            return cacheResult;        }    }    ...        // 把EGLImage转换成纹理,范例为GL_TEXTURE_EXTERNAL_OES        bindExternalTextureImage(texName, *cachedImage->second);        mTextureView.insert_or_assign(texName, buffer->getId());    }}void GLESRenderEngine::bindExternalTextureImage(uint32_t texName, const Image& image) {    ATRACE_CALL();    const GLImage& glImage = static_cast<const GLImage&>(image);    const GLenum target = GL_TEXTURE_EXTERNAL_OES;     //绑定纹理,纹理ID为texName    glBindTexture(target, texName);    if (glImage.getEGLImage() != EGL_NO_IMAGE_KHR) {        // 把EGLImage转换成纹理,纹理ID为texName        glEGLImageTargetTexture2DOES(target, static_cast<GLeglImageOES>(glImage.getEGLImage()));    }}至此,将输入和输出的Buffer都天生了纹理对应,以及设置了纹理的坐标和极点的坐标,接下来就要利用shader举行绘制了。
文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppvoid GLESRenderEngine::drawMesh(const Mesh& mesh) {    ATRACE_CALL();    if (mesh.getTexCoordsSize()) {        //开启极点着色器属性,,目标是能在极点着色器中访问极点的属性数据        glEnableVertexAttribArray(Program::texCoords);       // 给极点着色器传纹理的坐标        glVertexAttribPointer(Program::texCoords, mesh.getTexCoordsSize(), GL_FLOAT, GL_FALSE,                              mesh.getByteStride(), mesh.getTexCoords());    }    //给极点着色器传极点的坐标    glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,                          mesh.getByteStride(), mesh.getPositions());    ...    // 创建极点和片断着色器,将极点属性设和一些常量参数设到shader内里    ProgramCache::getInstance().useProgram(mInProtectedContext ? mProtectedEGLContext : mEGLContext,                                           managedState);    ...    // 调GPU去draw    glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());    ...}文件:frameworks/native/libs/renderengine/gl/ProgramCache.cppvoid ProgramCache::useProgram(EGLContext context, const Description& description) {    //设置key值,根据差别的key值创建差别的shader    Key needs(computeKey(description));        // look-up the program in the cache    auto& cache = mCaches[context];    auto it = cache.find(needs);    if (it == cache.end()) {        // we didn't find our program, so generate one...        nsecs_t time = systemTime();        // 假如cache内里没有类似的program则重新创建一个        it = cache.emplace(needs, generateProgram(needs)).first;        time = systemTime() - time;        ALOGV(">>> generated new program for context %p: needs=%08X, time=%u ms (%zu programs)",              context, needs.mKey, uint32_t(ns2ms(time)), cache.size());    }           // here we have a suitable program for this description    std::unique_ptr<rogram>& program = it->second;    if (program->isValid()) {        program->use();        program->setUniforms(description);    }}std::unique_ptr<rogram> ProgramCache::generateProgram(const Key& needs) {    ATRACE_CALL();    // 创建极点着色器    String8 vs = generateVertexShader(needs);    // 创建片断着色器    String8 fs = generateFragmentShader(needs);     // 链接和编译着色器    return std::make_unique<rogram>(needs, vs.string(), fs.string());}String8 ProgramCache::generateVertexShader(const Key& needs) {    Formatter vs;    if (needs.hasTextureCoords()) {         // attribute属性通过glVertexAttribPointer设置,varying 表现输出给片断着色器的数据        vs << "attribute vec4 texCoords;"           << "varying vec2 outTexCoords;";    }    ...    vs << "attribute vec4 position;"       << "uniform mat4 projection;"       << "uniform mat4 texture;"       << "void main(void) {" << indent << "gl_Position = projection * position;";    if (needs.hasTextureCoords()) {        vs << "outTexCoords = (texture * texCoords).st;";    }    ...    return vs.getString();}String8 ProgramCache::generateFragmentShader(const Key& needs) {    Formatter fs;    if (needs.getTextureTarget() == Key::TEXTURE_EXT) {        fs << "#extension GL_OES_EGL_image_external : require";    }    // default precision is required-ish in fragment shaders    fs << "precision mediump float;";    if (needs.getTextureTarget() == Key::TEXTURE_EXT) {        fs << "uniform samplerExternalOES sampler;";    } else if (needs.getTextureTarget() == Key::TEXTURE_2D) {        fs << "uniform sampler2D sampler;";    }    if (needs.hasTextureCoords()) {        fs << "varying vec2 outTexCoords;";    }     ...    fs << "void main(void) {" << indent;    ...        if (needs.isTexturing()) {            // 输出像素的颜色值            fs << "gl_FragColor = texture2D(sampler, outTexCoords);"    ...}文件: frameworks/native/libs/renderengine/gl/Program.cppProgram:rogram(const ProgramCache::Key& /*needs*/, const char* vertex, const char* fragment)      : mInitialized(false) {    // 编译极点和片断着色器    GLuint vertexId = buildShader(vertex, GL_VERTEX_SHADER);    GLuint fragmentId = buildShader(fragment, GL_FRAGMENT_SHADER);    // 创建programID    GLuint programId = glCreateProgram();    // 将极点和片断着色器链接到programe    glAttachShader(programId, vertexId);    glAttachShader(programId, fragmentId);    // 将着色器内里的属性和自界说的属性变量绑定    glBindAttribLocation(programId, position, "position");    glBindAttribLocation(programId, texCoords, "texCoords");    glBindAttribLocation(programId, cropCoords, "cropCoords");    glBindAttribLocation(programId, shadowColor, "shadowColor");    glBindAttribLocation(programId, shadowParams, "shadowParams");    glLinkProgram(programId);    GLint status;    glGetProgramiv(programId, GL_LINK_STATUS, &status);    ...        mProgram = programId;        mVertexShader = vertexId;        mFragmentShader = fragmentId;        mInitialized = true;        //得到着色器内里uniform变量的位置        mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");        mTextureMatrixLoc = glGetUniformLocation(programId, "texture");        ...        // set-up the default values for our uniforms        glUseProgram(programId);        glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, mat4().asArray());        glEnableVertexAttribArray(0);}void Program::use() {    // Program收效    glUseProgram(mProgram);} void Program::setUniforms(const Description& desc) {    // TODO: we should have a mechanism here to not always reset uniforms that    // didn't change for this program.    // 根据uniform的位置,给uniform变量设置,设到shader内里    if (mSamplerLoc >= 0) {        glUniform1i(mSamplerLoc, 0);        glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.texture.getMatrix().asArray());    }   ...       glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.projectionMatrix.asArray());    }末了调用glDrawArrays,利用GPU来绘制,可见对于GPU来说,输入都是一幅幅纹理,然后在着色器内里控制末了pixel的位置坐标和颜色值。
利用GPU绘制通常陪同着一个acquire fence,看下acquire fence的生。
文件: frameworks/native/libs/renderengine/gl/GLESRenderEngine.cppbase::unique_fd GLESRenderEngine::flush() {    ATRACE_CALL();    if (!GLExtensions::getInstance().hasNativeFenceSync()) {        return base::unique_fd();    }    // 创建一个EGLSync对象,用来标识GPU是否绘制完    EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);    if (sync == EGL_NO_SYNC_KHR) {        ALOGW("failed to create EGL native fence sync: %#x", eglGetError());        return base::unique_fd();    }    // native fence fd will not be populated until flush() is done.    // 将gl command下令全部刷给GPU    glFlush();    // get the fence fd     //得到android 利用的fence fd    base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));    eglDestroySyncKHR(mEGLDisplay, sync);    if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {        ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());    }    // Only trace if we have a valid fence, as current usage falls back to    // calling finish() if the fence fd is invalid.    if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer) && fenceFd.get() >= 0) {        mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));    }    return fenceFd;}到这里,CPU将下令全部给到GPU了,然后GPU本身去draw,CPU继续往下运行。
回到finishFrame 函数,得到GPU合成的fence后,会实行queueBuffer操纵。
文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cppvoid Output::finishFrame(const compositionengine::CompositionRefreshArgs& refreshArgs) {    ATRACE_CALL();    ALOGV(__FUNCTION__);    if (!getState().isEnabled) {        return;    }    // Repaint the framebuffer (if needed), getting the optional fence for when    // the composition completes.    auto optReadyFence = composeSurfaces(Region::INVALID_REGION, refreshArgs);    if (!optReadyFence) {        return;    }    // swap buffers (presentation)    mRenderSurface->queueBuffer(std::move(*optReadyFence));}文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/RenderSurface.cppvoid RenderSurface::queueBuffer(base::unique_fd readyFence) {    auto& state = mDisplay.getState();         ...            if (mGraphicBuffer == nullptr) {            ALOGE("No buffer is ready for display [%s]", mDisplay.getName().c_str());        } else {            status_t result =                    // mGraphicBuffer->getNativeBuffer() 是GPU输出的Buffer,可以明白为GPU将内容合成到该Buffer上                    mNativeWindow->queueBuffer(mNativeWindow.get(),                                               mGraphicBuffer->getNativeBuffer(), dup(readyFence));            if (result != NO_ERROR) {                ALOGE("Error when queueing buffer for display [%s]: %d", mDisplay.getName().c_str(),                      result);                // We risk blocking on dequeueBuffer if the primary display failed                // to queue up its buffer, so crash here.                if (!mDisplay.isVirtual()) {                    LOG_ALWAYS_FATAL("ANativeWindow::queueBuffer failed with error: %d", result);                } else {                    mNativeWindow->cancelBuffer(mNativeWindow.get(),                                                mGraphicBuffer->getNativeBuffer(), dup(readyFence));                }            }            mGraphicBuffer = nullptr;        }    }    // 斲丧Buffer    status_t result = mDisplaySurface->advanceFrame();    if (result != NO_ERROR) {        ALOGE("[%s] failed pushing new frame to HWC: %d", mDisplay.getName().c_str(), result);    }}文件:frameworks/native/services/surfaceflinger/DisplayHardware/FramebufferSurface.cppstatus_t FramebufferSurface::advanceFrame() {    uint32_t slot = 0;    sp<GraphicBuffer> buf;    sp<Fence> acquireFence(Fence::NO_FENCE);       Dataspace dataspace = Dataspace::UNKNOWN;    // 斲丧这块Buffer    status_t result = nextBuffer(slot, buf, acquireFence, dataspace);    mDataSpace = dataspace;    if (result != NO_ERROR) {        ALOGE("error latching next FramebufferSurface buffer: %s (%d)",                strerror(-result), result);    }    return result;}status_t FramebufferSurface::nextBuffer(uint32_t& outSlot,        sp<GraphicBuffer>& outBuffer, sp<Fence>& outFence,        Dataspace& outDataspace) {    Mutex::Autolock lock(mMutex);    BufferItem item;    // acquire Buffer    status_t err = acquireBufferLocked(&item, 0);    ...    if (mCurrentBufferSlot != BufferQueue::INVALID_BUFFER_SLOT &&        item.mSlot != mCurrentBufferSlot) {        mHasPendingRelease = true;        mPreviousBufferSlot = mCurrentBufferSlot;        mPreviousBuffer = mCurrentBuffer;    }    //更新当前的Buffer和fence信息    mCurrentBufferSlot = item.mSlot;    mCurrentBuffer = mSlots[mCurrentBufferSlot].mGraphicBuffer;    mCurrentFence = item.mFence;    outFence = item.mFence;    mHwcBufferCache.getHwcBuffer(mCurrentBufferSlot, mCurrentBuffer, &outSlot, &outBuffer);    outDataspace = static_cast<Dataspace>(item.mDataSpace);     // 将GPU输出的Buffer和fence给到hwc    status_t result = mHwc.setClientTarget(mDisplayId, outSlot, outFence, outBuffer, outDataspace);    if (result != NO_ERROR) {        ALOGE("error posting framebuffer: %d", result);        return result;    }    return NO_ERROR;}GPU合成的Buffer通过setClientTarget 设给hwc,有GPU合成的layer必要先validate再present,以是还必要再present一次,逻辑在postFramebuffer 内里。
文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cppvoid Output::postFramebuffer() {    ATRACE_CALL();    ALOGV(__FUNCTION__);   ...    auto frame = presentAndGetFrameFences();    mRenderSurface->onPresentDisplayCompleted();    ...}   文件:frameworks/native/services/surfaceflinger/DisplayHardware/HWComposer.cppstatus_t HWComposer::presentAndGetReleaseFences(DisplayId displayId) {    ATRACE_CALL();        RETURN_IF_INVALID_DISPLAY(displayId, BAD_INDEX);            auto& displayData = mDisplayData[displayId];    auto& hwcDisplay = displayData.hwcDisplay;         ...    // GPU合成时实行present,返回present fence    auto error = hwcDisplay->present(&displayData.lastPresentFence);    RETURN_IF_HWC_ERROR_FOR("present", error, displayId, UNKNOWN_ERROR);    std::unordered_map<HWC2:ayer*, sp<Fence>> releaseFences;    // 从hwc内里得到release fence    error = hwcDisplay->getReleaseFences(&releaseFences);    RETURN_IF_HWC_ERROR_FOR("getReleaseFences", error, displayId, UNKNOWN_ERROR);    displayData.releaseFences = std::move(releaseFences);    return NO_ERROR;}文件: frameworks/native/services/surfaceflinger/DisplayHardware/FramebufferSurface.cppvoid FramebufferSurface:nFrameCommitted() {    if (mHasPendingRelease) {        sp<Fence> fence = mHwc.getPresentFence(mDisplayId);        if (fence->isValid()) {            // 更新BufferSlot的 fence            status_t result = addReleaseFence(mPreviousBufferSlot,                    mPreviousBuffer, fence);            ALOGE_IF(result != NO_ERROR, "onFrameCommitted: failed to add the"                    " fence: %s (%d)", strerror(-result), result);        }        // 释放之前的Buffer        status_t result = releaseBufferLocked(mPreviousBufferSlot, mPreviousBuffer);        ALOGE_IF(result != NO_ERROR, "onFrameCommitted: error releasing buffer:"                " %s (%d)", strerror(-result), result);            mPreviousBuffer.clear();        mHasPendingRelease = false;    }}至此GPU合成的layer通过present调到hwc,hwc再实行commit上屏,此中有一些fence同步的代码,就先不分析了。
您需要登录后才可以回帖 登录 | 立即注册

Powered by CangBaoKu v1.0 小黑屋藏宝库It社区( 冀ICP备14008649号 )

GMT+8, 2025-4-18 14:33, Processed in 0.125116 second(s), 32 queries.© 2003-2025 cbk Team.

快速回复 返回顶部 返回列表