Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Firefox/gfx/webrender_bindings/   (Browser von der Mozilla Stiftung Version 136.0.1©)  Datei vom 10.2.2025 mit Größe 82 kB image not shown  

Quelle  DCLayerTree.cpp   Sprache: C

 
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */


#include "DCLayerTree.h"

// -

#include <d3d11.h>
#include <dcomp.h>
#include <d3d11_1.h>
#include <dxgi1_2.h>

// -

#include "gfxWindowsPlatform.h"
#include "GLContext.h"
#include "GLContextEGL.h"
#include "mozilla/gfx/DeviceManagerDx.h"
#include "mozilla/gfx/Logging.h"
#include "mozilla/gfx/gfxVars.h"
#include "mozilla/gfx/GPUParent.h"
#include "mozilla/gfx/Matrix.h"
#include "mozilla/layers/HelpersD3D11.h"
#include "mozilla/StaticPrefs_gfx.h"
#include "mozilla/StaticPtr.h"
#include "mozilla/webrender/RenderD3D11TextureHost.h"
#include "mozilla/webrender/RenderDcompSurfaceTextureHost.h"
#include "mozilla/webrender/RenderTextureHost.h"
#include "mozilla/webrender/RenderThread.h"
#include "mozilla/WindowsVersion.h"
#include "mozilla/Telemetry.h"
#include "nsPrintfCString.h"
#include "WinUtils.h"

// -

namespace mozilla {
namespace wr {

extern LazyLogModule gRenderThreadLog;
#define LOG(...) MOZ_LOG(gRenderThreadLog, LogLevel::Debug, (__VA_ARGS__))

#define LOG_H(msg, ...)                   \
  MOZ_LOG(gDcompSurface, LogLevel::Debug, \
          ("DCSurfaceHandle=%p, " msg, this##__VA_ARGS__))

static UINT GetVendorId(ID3D11VideoDevice* const aVideoDevice) {
  RefPtr<IDXGIDevice> dxgiDevice;
  RefPtr<IDXGIAdapter> adapter;
  aVideoDevice->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));
  dxgiDevice->GetAdapter(getter_AddRefs(adapter));

  DXGI_ADAPTER_DESC adapterDesc;
  adapter->GetDesc(&adapterDesc);

  return adapterDesc.VendorId;
}

// Undocumented NVIDIA VSR data
struct NvidiaVSRGetData_v1 {
  UINT vsrGPUisVSRCapable : 1;   // 01/32, 1: GPU is VSR capable
  UINT vsrOtherFieldsValid : 1;  // 02/32, 1: Other status fields are valid
  // remaining fields are valid if vsrOtherFieldsValid is set - requires
  // previous execution of VPBlt with SetStreamExtension for VSR enabled.
  UINT vsrEnabled : 1;           // 03/32, 1: VSR is enabled
  UINT vsrIsInUseForThisVP : 1;  // 04/32, 1: VSR is in use by this Video
                                 // Processor
  UINT vsrLevel : 3;             // 05-07/32, 0-4 current level
  UINT vsrReserved : 21;         // 32-07
};

static Result<NvidiaVSRGetData_v1, HRESULT> GetNvidiaVpSuperResolutionInfo(
    ID3D11VideoContext* aVideoContext, ID3D11VideoProcessor* aVideoProcessor) {
  MOZ_ASSERT(aVideoContext);
  MOZ_ASSERT(aVideoProcessor);

  // Undocumented NVIDIA driver constants
  constexpr GUID nvGUID = {0xD43CE1B3,
                           0x1F4B,
                           0x48AC,
                           {0xBA, 0xEE, 0xC3, 0xC2, 0x53, 0x75, 0xE6, 0xF7}};

  NvidiaVSRGetData_v1 data{};
  HRESULT hr = aVideoContext->VideoProcessorGetStreamExtension(
      aVideoProcessor, 0, &nvGUID, sizeof(data), &data);

  if (FAILED(hr)) {
    return Err(hr);
  }
  return data;
}

static void AddProfileMarkerForNvidiaVpSuperResolutionInfo(
    ID3D11VideoContext* aVideoContext, ID3D11VideoProcessor* aVideoProcessor) {
  MOZ_ASSERT(profiler_thread_is_being_profiled_for_markers());

  auto res = GetNvidiaVpSuperResolutionInfo(aVideoContext, aVideoProcessor);
  if (res.isErr()) {
    return;
  }

  auto data = res.unwrap();

  nsPrintfCString str(
      "SuperResolution VP Capable %u OtherFieldsValid %u Enabled %u InUse %u "
      "Level %u",
      data.vsrGPUisVSRCapable, data.vsrOtherFieldsValid, data.vsrEnabled,
      data.vsrIsInUseForThisVP, data.vsrLevel);
  PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str);
}

static HRESULT SetNvidiaVpSuperResolution(ID3D11VideoContext* aVideoContext,
                                          ID3D11VideoProcessor* aVideoProcessor,
                                          bool aEnable) {
  LOG("SetNvidiaVpSuperResolution() aEnable=%d", aEnable);

  // Undocumented NVIDIA driver constants
  constexpr GUID nvGUID = {0xD43CE1B3,
                           0x1F4B,
                           0x48AC,
                           {0xBA, 0xEE, 0xC3, 0xC2, 0x53, 0x75, 0xE6, 0xF7}};

  constexpr UINT nvExtensionVersion = 0x1;
  constexpr UINT nvExtensionMethodSuperResolution = 0x2;
  struct {
    UINT version;
    UINT method;
    UINT enable;
  } streamExtensionInfo = {nvExtensionVersion, nvExtensionMethodSuperResolution,
                           aEnable ? 1u : 0};

  HRESULT hr;
  hr = aVideoContext->VideoProcessorSetStreamExtension(
      aVideoProcessor, 0, &nvGUID, sizeof(streamExtensionInfo),
      &streamExtensionInfo);
  return hr;
}

static HRESULT SetVpSuperResolution(UINT aGpuVendorId,
                                    ID3D11VideoContext* aVideoContext,
                                    ID3D11VideoProcessor* aVideoProcessor,
                                    bool aEnable) {
  MOZ_ASSERT(aVideoContext);
  MOZ_ASSERT(aVideoProcessor);

  if (aGpuVendorId == 0x10DE) {
    return SetNvidiaVpSuperResolution(aVideoContext, aVideoProcessor, aEnable);
  }
  return E_NOTIMPL;
}

static bool GetNvidiaRTXVideoTrueHDRSupported(
    ID3D11VideoContext* aVideoContext, ID3D11VideoProcessor* aVideoProcessor) {
  const GUID kNvidiaTrueHDRInterfaceGUID = {
      0xfdd62bb4,
      0x620b,
      0x4fd7,
      {0x9a, 0xb3, 0x1e, 0x59, 0xd0, 0xd5, 0x44, 0xb3}};
  UINT available = 0;
  HRESULT hr = aVideoContext->VideoProcessorGetStreamExtension(
      aVideoProcessor, 0, &kNvidiaTrueHDRInterfaceGUID, sizeof(available),
      &available);
  if (FAILED(hr)) {
    return false;
  }

  bool driverSupportsTrueHdr = (available == 1);
  return driverSupportsTrueHdr;
}

static HRESULT SetNvidiaRTXVideoTrueHDR(ID3D11VideoContext* aVideoContext,
                                        ID3D11VideoProcessor* aVideoProcessor,
                                        bool aEnable) {
  constexpr GUID kNvidiaTrueHDRInterfaceGUID = {
      0xfdd62bb4,
      0x620b,
      0x4fd7,
      {0x9a, 0xb3, 0x1e, 0x59, 0xd0, 0xd5, 0x44, 0xb3}};
  constexpr UINT kStreamExtensionMethodTrueHDR = 0x3;
  const UINT TrueHDRVersion4 = 4;
  struct {
    UINT version;
    UINT method;
    UINT enable : 1;
    UINT reserved : 31;
  } streamExtensionInfo = {TrueHDRVersion4, kStreamExtensionMethodTrueHDR,
                           aEnable ? 1u : 0u, 0u};
  HRESULT hr = aVideoContext->VideoProcessorSetStreamExtension(
      aVideoProcessor, 0, &kNvidiaTrueHDRInterfaceGUID,
      sizeof(streamExtensionInfo), &streamExtensionInfo);
  return hr;
}

static bool GetVpAutoHDRSupported(UINT aGpuVendorId,
                                  ID3D11VideoContext* aVideoContext,
                                  ID3D11VideoProcessor* aVideoProcessor) {
  MOZ_ASSERT(aVideoContext);
  MOZ_ASSERT(aVideoProcessor);

  if (aGpuVendorId == 0x10DE) {
    return GetNvidiaRTXVideoTrueHDRSupported(aVideoContext, aVideoProcessor);
  }
  return false;
}

static HRESULT SetVpAutoHDR(UINT aGpuVendorId,
                            ID3D11VideoContext* aVideoContext,
                            ID3D11VideoProcessor* aVideoProcessor,
                            bool aEnable) {
  MOZ_ASSERT(aVideoContext);
  MOZ_ASSERT(aVideoProcessor);

  if (aGpuVendorId == 0x10DE) {
    return SetNvidiaRTXVideoTrueHDR(aVideoContext, aVideoProcessor, aEnable);
  }
  MOZ_ASSERT_UNREACHABLE("Unexpected to be called");
  return E_NOTIMPL;
}

StaticAutoPtr<GpuOverlayInfo> DCLayerTree::sGpuOverlayInfo;

/* static */
UniquePtr<DCLayerTree> DCLayerTree::Create(gl::GLContext* aGL,
                                           EGLConfig aEGLConfig,
                                           ID3D11Device* aDevice,
                                           ID3D11DeviceContext* aCtx,
                                           HWND aHwnd, nsACString& aError) {
  RefPtr<IDCompositionDevice2> dCompDevice =
      gfx::DeviceManagerDx::Get()->GetDirectCompositionDevice();
  if (!dCompDevice) {
    aError.Assign("DCLayerTree(no device)"_ns);
    return nullptr;
  }

  auto layerTree = MakeUnique<DCLayerTree>(aGL, aEGLConfig, aDevice, aCtx,
                                           aHwnd, dCompDevice);
  if (!layerTree->Initialize(aHwnd, aError)) {
    return nullptr;
  }

  return layerTree;
}

void DCLayerTree::Shutdown() { DCLayerTree::sGpuOverlayInfo = nullptr; }

DCLayerTree::DCLayerTree(gl::GLContext* aGL, EGLConfig aEGLConfig,
                         ID3D11Device* aDevice, ID3D11DeviceContext* aCtx,
                         HWND aHwnd, IDCompositionDevice2* aCompositionDevice)
    : mGL(aGL),
      mEGLConfig(aEGLConfig),
      mDevice(aDevice),
      mCtx(aCtx),
      mHwnd(aHwnd),
      mCompositionDevice(aCompositionDevice),
      mDebugCounter(false),
      mDebugVisualRedrawRegions(false),
      mEGLImage(EGL_NO_IMAGE),
      mColorRBO(0),
      mPendingCommit(false) {
  LOG("DCLayerTree::DCLayerTree()");
}

DCLayerTree::~DCLayerTree() {
  LOG("DCLayerTree::~DCLayerTree()");

  ReleaseNativeCompositorResources();
}

void DCLayerTree::ReleaseNativeCompositorResources() {
  const auto gl = GetGLContext();

  DestroyEGLSurface();

  // Delete any cached FBO objects
  for (auto it = mFrameBuffers.begin(); it != mFrameBuffers.end(); ++it) {
    gl->fDeleteRenderbuffers(1, &it->depthRboId);
    gl->fDeleteFramebuffers(1, &it->fboId);
  }
}

bool DCLayerTree::Initialize(HWND aHwnd, nsACString& aError) {
  HRESULT hr;

  RefPtr<IDCompositionDesktopDevice> desktopDevice;
  hr = mCompositionDevice->QueryInterface(
      (IDCompositionDesktopDevice**)getter_AddRefs(desktopDevice));
  if (FAILED(hr)) {
    aError.Assign(nsPrintfCString(
        "DCLayerTree(get IDCompositionDesktopDevice failed %lx)", hr));
    return false;
  }

  hr = desktopDevice->CreateTargetForHwnd(aHwnd, TRUE,
                                          getter_AddRefs(mCompositionTarget));
  if (FAILED(hr)) {
    aError.Assign(nsPrintfCString(
        "DCLayerTree(create DCompositionTarget failed %lx)", hr));
    return false;
  }

  hr = mCompositionDevice->CreateVisual(getter_AddRefs(mRootVisual));
  if (FAILED(hr)) {
    aError.Assign(nsPrintfCString(
        "DCLayerTree(create root DCompositionVisual failed %lx)", hr));
    return false;
  }

  hr =
      mCompositionDevice->CreateVisual(getter_AddRefs(mDefaultSwapChainVisual));
  if (FAILED(hr)) {
    aError.Assign(nsPrintfCString(
        "DCLayerTree(create swap chain DCompositionVisual failed %lx)", hr));
    return false;
  }

  if (gfx::gfxVars::UseWebRenderDCompVideoHwOverlayWin() ||
      gfx::gfxVars::UseWebRenderDCompVideoSwOverlayWin()) {
    if (!InitializeVideoOverlaySupport()) {
      RenderThread::Get()->HandleWebRenderError(WebRenderError::VIDEO_OVERLAY);
    }
  }
  if (!sGpuOverlayInfo) {
    // Set default if sGpuOverlayInfo was not set.
    sGpuOverlayInfo = new GpuOverlayInfo();
  }

  // Initialize SwapChainInfo
  SupportsSwapChainTearing();

  mCompositionTarget->SetRoot(mRootVisual);
  // Set interporation mode to nearest, to ensure 1:1 sampling.
  // By default, a visual inherits the interpolation mode of the parent visual.
  // If no visuals set the interpolation mode, the default for the entire visual
  // tree is nearest neighbor interpolation.
  mRootVisual->SetBitmapInterpolationMode(
      DCOMPOSITION_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR);
  return true;
}

bool FlagsSupportsOverlays(UINT flags) {
  return (flags & (DXGI_OVERLAY_SUPPORT_FLAG_DIRECT |
                   DXGI_OVERLAY_SUPPORT_FLAG_SCALING));
}

// A warpper of IDXGIOutput4::CheckOverlayColorSpaceSupport()
bool CheckOverlayColorSpaceSupport(DXGI_FORMAT aDxgiFormat,
                                   DXGI_COLOR_SPACE_TYPE aDxgiColorSpace,
                                   RefPtr<IDXGIOutput> aOutput,
                                   RefPtr<ID3D11Device> aD3d11Device) {
  UINT colorSpaceSupportFlags = 0;
  RefPtr<IDXGIOutput4> output4;

  if (FAILED(aOutput->QueryInterface(__uuidof(IDXGIOutput4),
                                     getter_AddRefs(output4)))) {
    return false;
  }

  if (FAILED(output4->CheckOverlayColorSpaceSupport(
          aDxgiFormat, aDxgiColorSpace, aD3d11Device,
          &colorSpaceSupportFlags))) {
    return false;
  }

  return (colorSpaceSupportFlags &
          DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG_PRESENT);
}

bool DCLayerTree::InitializeVideoOverlaySupport() {
  MOZ_ASSERT(IsWin10AnniversaryUpdateOrLater());

  HRESULT hr;

  hr = mDevice->QueryInterface(
      (ID3D11VideoDevice**)getter_AddRefs(mVideoDevice));
  if (FAILED(hr)) {
    gfxCriticalNote << "Failed to get D3D11VideoDevice: " << gfx::hexa(hr);
    return false;
  }

  hr =
      mCtx->QueryInterface((ID3D11VideoContext**)getter_AddRefs(mVideoContext));
  if (FAILED(hr)) {
    gfxCriticalNote << "Failed to get D3D11VideoContext: " << gfx::hexa(hr);
    return false;
  }

  if (sGpuOverlayInfo) {
    return true;
  }

  UniquePtr<GpuOverlayInfo> info = MakeUnique<GpuOverlayInfo>();

  RefPtr<IDXGIDevice> dxgiDevice;
  RefPtr<IDXGIAdapter> adapter;
  mDevice->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));
  dxgiDevice->GetAdapter(getter_AddRefs(adapter));

  unsigned int i = 0;
  while (true) {
    RefPtr<IDXGIOutput> output;
    if (FAILED(adapter->EnumOutputs(i++, getter_AddRefs(output)))) {
      break;
    }
    RefPtr<IDXGIOutput3> output3;
    if (FAILED(output->QueryInterface(__uuidof(IDXGIOutput3),
                                      getter_AddRefs(output3)))) {
      break;
    }

    output3->CheckOverlaySupport(DXGI_FORMAT_NV12, mDevice,
                                 &info->mNv12OverlaySupportFlags);
    output3->CheckOverlaySupport(DXGI_FORMAT_YUY2, mDevice,
                                 &info->mYuy2OverlaySupportFlags);
    output3->CheckOverlaySupport(DXGI_FORMAT_B8G8R8A8_UNORM, mDevice,
                                 &info->mBgra8OverlaySupportFlags);
    output3->CheckOverlaySupport(DXGI_FORMAT_R10G10B10A2_UNORM, mDevice,
                                 &info->mRgb10a2OverlaySupportFlags);

    if (FlagsSupportsOverlays(info->mNv12OverlaySupportFlags)) {
      // NV12 format is preferred if it's supported.
      info->mOverlayFormatUsed = DXGI_FORMAT_NV12;
      info->mSupportsHardwareOverlays = true;
    }

    if (!info->mSupportsHardwareOverlays &&
        FlagsSupportsOverlays(info->mYuy2OverlaySupportFlags)) {
      // If NV12 isn't supported, fallback to YUY2 if it's supported.
      info->mOverlayFormatUsed = DXGI_FORMAT_YUY2;
      info->mSupportsHardwareOverlays = true;
    }

    // RGB10A2 overlay is used for displaying HDR content. In Intel's
    // platform, RGB10A2 overlay is enabled only when
    // DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020 is supported.
    if (FlagsSupportsOverlays(info->mRgb10a2OverlaySupportFlags)) {
      if (!CheckOverlayColorSpaceSupport(
              DXGI_FORMAT_R10G10B10A2_UNORM,
              DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020, output, mDevice))
        info->mRgb10a2OverlaySupportFlags = 0;
    }

    // Early out after the first output that reports overlay support. All
    // outputs are expected to report the same overlay support according to
    // Microsoft's WDDM documentation:
    // https://docs.microsoft.com/en-us/windows-hardware/drivers/display/multiplane-overlay-hardware-requirements
    if (info->mSupportsHardwareOverlays) {
      break;
    }
  }

  if (!StaticPrefs::gfx_webrender_dcomp_video_yuv_overlay_win_AtStartup()) {
    info->mOverlayFormatUsed = DXGI_FORMAT_B8G8R8A8_UNORM;
    info->mSupportsHardwareOverlays = false;
  }

  info->mSupportsOverlays = info->mSupportsHardwareOverlays;

  // Check VpSuperResolution and VpAutoHDR support.
  const auto size = gfx::IntSize(100, 100);
  if (EnsureVideoProcessor(size, size)) {
    const UINT vendorId = GetVendorId(mVideoDevice);
    if (vendorId == 0x10DE) {
      auto res = GetNvidiaVpSuperResolutionInfo(mVideoContext, mVideoProcessor);
      if (res.isOk() && res.unwrap().vsrGPUisVSRCapable) {
        info->mSupportsVpSuperResolution = true;
      }
    }

    const bool driverSupportVpAutoHDR =
        GetVpAutoHDRSupported(vendorId, mVideoContext, mVideoProcessor);
    if (driverSupportVpAutoHDR) {
      info->mSupportsVpAutoHDR = true;
    }
  }

  // Note: "UniquePtr::release" here is saying "release your ownership stake
  // on your pointer, so that our StaticAutoPtr can take over ownership".
  // (StaticAutoPtr doesn't have a move constructor that could directly steal
  // the contents of a UniquePtr via std::move().)
  sGpuOverlayInfo = info.release();

  if (auto* gpuParent = gfx::GPUParent::GetSingleton()) {
    gpuParent->NotifyOverlayInfo(GetOverlayInfo());
  }

  return true;
}

DCSurface* DCLayerTree::GetSurface(wr::NativeSurfaceId aId) const {
  auto surface_it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(surface_it != mDCSurfaces.end());
  return surface_it->second.get();
}

void DCLayerTree::SetDefaultSwapChain(IDXGISwapChain1* aSwapChain) {
  LOG("DCLayerTree::SetDefaultSwapChain()");

  mRootVisual->AddVisual(mDefaultSwapChainVisual, TRUE, nullptr);
  mDefaultSwapChainVisual->SetContent(aSwapChain);
  // Default SwapChain's visual does not need linear interporation.
  mDefaultSwapChainVisual->SetBitmapInterpolationMode(
      DCOMPOSITION_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR);
  mPendingCommit = true;
}

void DCLayerTree::MaybeUpdateDebug() {
  bool updated = false;
  updated |= MaybeUpdateDebugCounter();
  updated |= MaybeUpdateDebugVisualRedrawRegions();
  if (updated) {
    mPendingCommit = true;
  }
}

void DCLayerTree::MaybeCommit() {
  if (!mPendingCommit) {
    return;
  }
  mCompositionDevice->Commit();
}

void DCLayerTree::WaitForCommitCompletion() {
  // To ensure that swapchain layers have presented to the screen
  // for capture, call present twice. This is less than ideal, but
  // I'm not sure if there is a better way to ensure this syncs
  // correctly that works on both Win10/11. Even though this can
  // be slower than necessary, it's only used by the reftest
  // screenshotting code, so isn't particularly perf sensitive.
  for (auto it = mDCSurfaces.begin(); it != mDCSurfaces.end(); it++) {
    auto* surface = it->second->AsDCSwapChain();
    if (surface) {
      surface->Present();
      surface->Present();
    }
  }

  mCompositionDevice->WaitForCommitCompletion();
}

void DCLayerTree::DisableNativeCompositor() {
  MOZ_ASSERT(mCurrentSurface.isNothing());
  MOZ_ASSERT(mCurrentLayers.empty());

  ReleaseNativeCompositorResources();
  mPrevLayers.clear();
  mRootVisual->RemoveAllVisuals();
}

bool DCLayerTree::MaybeUpdateDebugCounter() {
  bool debugCounter = StaticPrefs::gfx_webrender_debug_dcomp_counter();
  if (mDebugCounter == debugCounter) {
    return false;
  }

  RefPtr<IDCompositionDeviceDebug> debugDevice;
  HRESULT hr = mCompositionDevice->QueryInterface(
      (IDCompositionDeviceDebug**)getter_AddRefs(debugDevice));
  if (FAILED(hr)) {
    return false;
  }

  if (debugCounter) {
    debugDevice->EnableDebugCounters();
  } else {
    debugDevice->DisableDebugCounters();
  }

  mDebugCounter = debugCounter;
  return true;
}

bool DCLayerTree::MaybeUpdateDebugVisualRedrawRegions() {
  bool debugVisualRedrawRegions =
      StaticPrefs::gfx_webrender_debug_dcomp_redraw_regions();
  if (mDebugVisualRedrawRegions == debugVisualRedrawRegions) {
    return false;
  }

  RefPtr<IDCompositionVisualDebug> visualDebug;
  HRESULT hr = mRootVisual->QueryInterface(
      (IDCompositionVisualDebug**)getter_AddRefs(visualDebug));
  if (FAILED(hr)) {
    return false;
  }

  if (debugVisualRedrawRegions) {
    visualDebug->EnableRedrawRegions();
  } else {
    visualDebug->DisableRedrawRegions();
  }

  mDebugVisualRedrawRegions = debugVisualRedrawRegions;
  return true;
}

void DCLayerTree::CompositorBeginFrame() {
  mCurrentFrame++;
  mUsedOverlayTypesInFrame = DCompOverlayTypes::NO_OVERLAY;
}

void DCLayerTree::CompositorEndFrame() {
  auto start = TimeStamp::Now();
  // Check if the visual tree of surfaces is the same as last frame.
  bool same = mPrevLayers == mCurrentLayers;

  if (!same) {
    // If not, we need to rebuild the visual tree. Note that addition or
    // removal of tiles no longer needs to rebuild the main visual tree
    // here, since they are added as children of the surface visual.
    mRootVisual->RemoveAllVisuals();
  }

  for (auto it = mCurrentLayers.begin(); it != mCurrentLayers.end(); ++it) {
    auto surface_it = mDCSurfaces.find(*it);
    MOZ_RELEASE_ASSERT(surface_it != mDCSurfaces.end());
    const auto surface = surface_it->second.get();
    // Ensure surface is trimmed to updated tile valid rects
    surface->UpdateAllocatedRect();
    if (!same) {
      // Add surfaces in z-order they were added to the scene.
      const auto visual = surface->GetVisual();
      mRootVisual->AddVisual(visual, false, nullptr);
    }
  }

  mPrevLayers.swap(mCurrentLayers);
  mCurrentLayers.clear();

  mCompositionDevice->Commit();

  auto end = TimeStamp::Now();
  mozilla::Telemetry::Accumulate(mozilla::Telemetry::COMPOSITE_SWAP_TIME,
                                 (end - start).ToMilliseconds() * 10.);

  // Remove any framebuffers that haven't been
  // used in the last 60 frames.
  //
  // This should use nsTArray::RemoveElementsBy once
  // CachedFrameBuffer is able to properly destroy
  // itself in the destructor.
  const auto gl = GetGLContext();
  for (uint32_t i = 0, len = mFrameBuffers.Length(); i < len; ++i) {
    auto& fb = mFrameBuffers[i];
    if ((mCurrentFrame - fb.lastFrameUsed) > 60) {
      gl->fDeleteRenderbuffers(1, &fb.depthRboId);
      gl->fDeleteFramebuffers(1, &fb.fboId);
      mFrameBuffers.UnorderedRemoveElementAt(i);
      --i;  // Examine the element again, if necessary.
      --len;
    }
  }

  if (!StaticPrefs::gfx_webrender_dcomp_video_check_slow_present()) {
    return;
  }

  // Disable video overlay if mCompositionDevice->Commit() with video overlay is
  // too slow. It drops fps.

  const auto maxCommitWaitDurationMs = 20;
  const auto maxSlowCommitCount = 5;
  const auto commitDurationMs =
      static_cast<uint32_t>((end - start).ToMilliseconds());

  nsPrintfCString marker("CommitWait overlay %u %ums ",
                         (uint8_t)mUsedOverlayTypesInFrame, commitDurationMs);
  PROFILER_MARKER_TEXT("CommitWait", GRAPHICS, {}, marker);

  if (mUsedOverlayTypesInFrame != DCompOverlayTypes::NO_OVERLAY &&
      commitDurationMs > maxCommitWaitDurationMs) {
    mSlowCommitCount++;
  } else {
    mSlowCommitCount = 0;
  }

  if (mSlowCommitCount <= maxSlowCommitCount) {
    return;
  }

  for (auto it = mDCSurfaces.begin(); it != mDCSurfaces.end(); it++) {
    auto* surfaceVideo = it->second->AsDCSurfaceVideo();
    if (surfaceVideo) {
      surfaceVideo->DisableVideoOverlay();
    }
  }

  if (mUsedOverlayTypesInFrame & DCompOverlayTypes::SOFTWARE_DECODED_VIDEO) {
    gfxCriticalNoteOnce << "Sw video swapchain present is slow";

    nsPrintfCString marker("Sw video swapchain present is slow");
    PROFILER_MARKER_TEXT("DisableOverlay", GRAPHICS, {}, marker);
  }
  if (mUsedOverlayTypesInFrame & DCompOverlayTypes::HARDWARE_DECODED_VIDEO) {
    gfxCriticalNoteOnce << "Hw video swapchain present is slow";

    nsPrintfCString marker("Hw video swapchain present is slow");
    PROFILER_MARKER_TEXT("DisableOverlay", GRAPHICS, {}, marker);
  }
}

void DCLayerTree::BindSwapChain(wr::NativeSurfaceId aId) {
  auto surface = GetSurface(aId);
  surface->AsDCSwapChain()->Bind();
}

void DCLayerTree::PresentSwapChain(wr::NativeSurfaceId aId) {
  auto surface = GetSurface(aId);
  surface->AsDCSwapChain()->Present();
}

void DCLayerTree::Bind(wr::NativeTileId aId, wr::DeviceIntPoint* aOffset,
                       uint32_t* aFboId, wr::DeviceIntRect aDirtyRect,
                       wr::DeviceIntRect aValidRect) {
  auto surface = GetSurface(aId.surface_id);
  auto tile = surface->GetTile(aId.x, aId.y);
  wr::DeviceIntPoint targetOffset{0, 0};

  // If tile owns an IDCompositionSurface we use it, otherwise we're using an
  // IDCompositionVirtualSurface owned by the DCSurface.
  RefPtr<IDCompositionSurface> compositionSurface;
  if (surface->mIsVirtualSurface) {
    gfx::IntRect validRect(aValidRect.min.x, aValidRect.min.y,
                           aValidRect.width(), aValidRect.height());
    if (!tile->mValidRect.IsEqualEdges(validRect)) {
      tile->mValidRect = validRect;
      surface->DirtyAllocatedRect();
    }
    wr::DeviceIntSize tileSize = surface->GetTileSize();
    compositionSurface = surface->GetCompositionSurface();
    wr::DeviceIntPoint virtualOffset = surface->GetVirtualOffset();
    targetOffset.x = virtualOffset.x + tileSize.width * aId.x;
    targetOffset.y = virtualOffset.y + tileSize.height * aId.y;
  } else {
    compositionSurface = tile->Bind(aValidRect);
  }

  if (tile->mNeedsFullDraw) {
    // dcomp requires that the first BeginDraw on a non-virtual surface is the
    // full size of the pixel buffer.
    auto tileSize = surface->GetTileSize();
    aDirtyRect.min.x = 0;
    aDirtyRect.min.y = 0;
    aDirtyRect.max.x = tileSize.width;
    aDirtyRect.max.y = tileSize.height;
    tile->mNeedsFullDraw = false;
  }

  *aFboId = CreateEGLSurfaceForCompositionSurface(
      aDirtyRect, aOffset, compositionSurface, targetOffset);
  mCurrentSurface = Some(compositionSurface);
}

void DCLayerTree::Unbind() {
  if (mCurrentSurface.isNothing()) {
    return;
  }

  RefPtr<IDCompositionSurface> surface = mCurrentSurface.ref();
  surface->EndDraw();

  DestroyEGLSurface();
  mCurrentSurface = Nothing();
}

void DCLayerTree::CreateSurface(wr::NativeSurfaceId aId,
                                wr::DeviceIntPoint aVirtualOffset,
                                wr::DeviceIntSize aTileSize, bool aIsOpaque) {
  auto it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(it == mDCSurfaces.end());
  if (it != mDCSurfaces.end()) {
    // DCSurface already exists.
    return;
  }

  // Tile size needs to be positive.
  if (aTileSize.width <= 0 || aTileSize.height <= 0) {
    gfxCriticalNote << "TileSize is not positive aId: " << wr::AsUint64(aId)
                    << " aTileSize(" << aTileSize.width << ","
                    << aTileSize.height << ")";
  }

  bool isVirtualSurface =
      StaticPrefs::gfx_webrender_dcomp_use_virtual_surfaces_AtStartup();
  auto surface = MakeUnique<DCSurface>(aTileSize, aVirtualOffset,
                                       isVirtualSurface, aIsOpaque, this);
  if (!surface->Initialize()) {
    gfxCriticalNote << "Failed to initialize DCSurface: " << wr::AsUint64(aId);
    return;
  }

  mDCSurfaces[aId] = std::move(surface);
}

void DCLayerTree::CreateSwapChainSurface(wr::NativeSurfaceId aId,
                                         wr::DeviceIntSize aSize,
                                         bool aIsOpaque) {
  auto it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(it == mDCSurfaces.end());

  auto surface = MakeUnique<DCSwapChain>(aSize, aIsOpaque, this);
  if (!surface->Initialize()) {
    gfxCriticalNote << "Failed to initialize DCSwapChain: "
                    << wr::AsUint64(aId);
    return;
  }

  mDCSurfaces[aId] = std::move(surface);
}

void DCLayerTree::ResizeSwapChainSurface(wr::NativeSurfaceId aId,
                                         wr::DeviceIntSize aSize) {
  auto it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(it != mDCSurfaces.end());
  auto surface = it->second.get();

  surface->AsDCSwapChain()->Resize(aSize);
}

void DCLayerTree::CreateExternalSurface(wr::NativeSurfaceId aId,
                                        bool aIsOpaque) {
  auto it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(it == mDCSurfaces.end());

  auto surface = MakeUnique<DCExternalSurfaceWrapper>(aIsOpaque, this);
  if (!surface->Initialize()) {
    gfxCriticalNote << "Failed to initialize DCExternalSurfaceWrapper: "
                    << wr::AsUint64(aId);
    return;
  }

  mDCSurfaces[aId] = std::move(surface);
}

void DCLayerTree::DestroySurface(NativeSurfaceId aId) {
  auto surface_it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(surface_it != mDCSurfaces.end());
  auto surface = surface_it->second.get();

  mRootVisual->RemoveVisual(surface->GetVisual());
  mDCSurfaces.erase(surface_it);
}

void DCLayerTree::CreateTile(wr::NativeSurfaceId aId, int32_t aX, int32_t aY) {
  auto surface = GetSurface(aId);
  surface->CreateTile(aX, aY);
}

void DCLayerTree::DestroyTile(wr::NativeSurfaceId aId, int32_t aX, int32_t aY) {
  auto surface = GetSurface(aId);
  surface->DestroyTile(aX, aY);
}

void DCLayerTree::AttachExternalImage(wr::NativeSurfaceId aId,
                                      wr::ExternalImageId aExternalImage) {
  auto surface_it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(surface_it != mDCSurfaces.end());
  surface_it->second->AttachExternalImage(aExternalImage);
}

void DCExternalSurfaceWrapper::AttachExternalImage(
    wr::ExternalImageId aExternalImage) {
  if (auto* surface = EnsureSurfaceForExternalImage(aExternalImage)) {
    surface->AttachExternalImage(aExternalImage);
  }
}

template <class ToT>
struct QI {
  template <class FromT>
  [[nodiscard]] static inline RefPtr<ToT> From(FromT* const from) {
    RefPtr<ToT> to;
    (void)from->QueryInterface(static_cast<ToT**>(getter_AddRefs(to)));
    return to;
  }
};

DCSurface* DCExternalSurfaceWrapper::EnsureSurfaceForExternalImage(
    wr::ExternalImageId aExternalImage) {
  if (mSurface) {
    return mSurface.get();
  }

  // Create a new surface based on the texture type.
  RenderTextureHost* texture =
      RenderThread::Get()->GetRenderTexture(aExternalImage);
  if (texture && texture->AsRenderDXGITextureHost()) {
    mSurface.reset(new DCSurfaceVideo(mIsOpaque, mDCLayerTree));
    if (!mSurface->Initialize()) {
      gfxCriticalNote << "Failed to initialize DCSurfaceVideo: "
                      << wr::AsUint64(aExternalImage);
      mSurface = nullptr;
    }
  } else if (texture && texture->AsRenderDcompSurfaceTextureHost()) {
    mSurface.reset(new DCSurfaceHandle(mIsOpaque, mDCLayerTree));
    if (!mSurface->Initialize()) {
      gfxCriticalNote << "Failed to initialize DCSurfaceHandle: "
                      << wr::AsUint64(aExternalImage);
      mSurface = nullptr;
    }
  }
  if (!mSurface) {
    gfxCriticalNote << "Failed to create a surface for external image: "
                    << gfx::hexa(texture);
    return nullptr;
  }

  // Add surface's visual which will contain video data to our root visual.
  const auto surfaceVisual = mSurface->GetVisual();
  mVisual->AddVisual(surfaceVisual, true, nullptr);

  // -
  // Apply color management.

  [&]() {
    if (!StaticPrefs::gfx_webrender_dcomp_color_manage_with_filters()) return;

    const auto cmsMode = GfxColorManagementMode();
    if (cmsMode == CMSMode::Off) return;

    const auto dcomp = mDCLayerTree->GetCompositionDevice();
    const auto dcomp3 = QI<IDCompositionDevice3>::From(dcomp);
    if (!dcomp3) {
      NS_WARNING(
          "No IDCompositionDevice3, cannot use dcomp for color management.");
      return;
    }

    // -

    const auto cspace = [&]() {
      const auto rangedCspace = texture->GetYUVColorSpace();
      const auto info = FromYUVRangedColorSpace(rangedCspace);
      auto ret = ToColorSpace2(info.space);
      if (ret == gfx::ColorSpace2::Display && cmsMode == CMSMode::All) {
        ret = gfx::ColorSpace2::SRGB;
      }
      return ret;
    }();

    const bool rec709GammaAsSrgb =
        StaticPrefs::gfx_color_management_rec709_gamma_as_srgb();
    const bool rec2020GammaAsRec709 =
        StaticPrefs::gfx_color_management_rec2020_gamma_as_rec709();

    auto cspaceDesc = color::ColorspaceDesc{};
    switch (cspace) {
      case gfx::ColorSpace2::Display:
        return;  // No color management needed!
      case gfx::ColorSpace2::SRGB:
        cspaceDesc.chrom = color::Chromaticities::Srgb();
        cspaceDesc.tf = color::PiecewiseGammaDesc::Srgb();
        break;

      case gfx::ColorSpace2::DISPLAY_P3:
        cspaceDesc.chrom = color::Chromaticities::DisplayP3();
        cspaceDesc.tf = color::PiecewiseGammaDesc::DisplayP3();
        break;

      case gfx::ColorSpace2::BT601_525:
        cspaceDesc.chrom = color::Chromaticities::Rec601_525_Ntsc();
        if (rec709GammaAsSrgb) {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Srgb();
        } else {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Rec709();
        }
        break;

      case gfx::ColorSpace2::BT709:
        cspaceDesc.chrom = color::Chromaticities::Rec709();
        if (rec709GammaAsSrgb) {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Srgb();
        } else {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Rec709();
        }
        break;

      case gfx::ColorSpace2::BT2020:
        cspaceDesc.chrom = color::Chromaticities::Rec2020();
        if (rec2020GammaAsRec709 && rec709GammaAsSrgb) {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Srgb();
        } else if (rec2020GammaAsRec709) {
          cspaceDesc.tf = color::PiecewiseGammaDesc::Rec709();
        } else {
          // Just Rec709 with slightly more precision.
          cspaceDesc.tf = color::PiecewiseGammaDesc::Rec2020_12bit();
        }
        break;
    }

    const auto cprofileIn = color::ColorProfileDesc::From(cspaceDesc);
    auto cprofileOut = mDCLayerTree->OutputColorProfile();
    bool pretendSrgb = true;
    if (pretendSrgb) {
      cprofileOut = color::ColorProfileDesc::From(color::ColorspaceDesc{
          .chrom = color::Chromaticities::Srgb(),
          .tf = color::PiecewiseGammaDesc::Srgb(),
      });
    }
    const auto conversion = color::ColorProfileConversionDesc::From({
        .src = cprofileIn,
        .dst = cprofileOut,
    });

    // -

    auto chain = ColorManagementChain::From(*dcomp3, conversion);
    mCManageChain = Some(chain);

    surfaceVisual->SetEffect(mCManageChain->last.get());
  }();

  return mSurface.get();
}

void DCExternalSurfaceWrapper::PresentExternalSurface(gfx::Matrix& aTransform) {
  MOZ_ASSERT(mSurface);
  if (auto* surface = mSurface->AsDCSurfaceVideo()) {
    if (surface->CalculateSwapChainSize(aTransform)) {
      surface->PresentVideo();
    }
  } else if (auto* surface = mSurface->AsDCSurfaceHandle()) {
    surface->PresentSurfaceHandle();
  }
}

template <typename T>
static inline D2D1_RECT_F D2DRect(const T& aRect) {
  return D2D1::RectF(aRect.X(), aRect.Y(), aRect.XMost(), aRect.YMost());
}

static inline D2D1_MATRIX_3X2_F D2DMatrix(const gfx::Matrix& aTransform) {
  return D2D1::Matrix3x2F(aTransform._11, aTransform._12, aTransform._21,
                          aTransform._22, aTransform._31, aTransform._32);
}

void DCLayerTree::AddSurface(wr::NativeSurfaceId aId,
                             const wr::CompositorSurfaceTransform& aTransform,
                             wr::DeviceIntRect aClipRect,
                             wr::ImageRendering aImageRendering) {
  auto it = mDCSurfaces.find(aId);
  MOZ_RELEASE_ASSERT(it != mDCSurfaces.end());
  const auto surface = it->second.get();
  const auto visual = surface->GetVisual();

  wr::DeviceIntPoint virtualOffset = surface->GetVirtualOffset();

  float sx = aTransform.scale.x;
  float sy = aTransform.scale.y;
  float tx = aTransform.offset.x;
  float ty = aTransform.offset.y;
  gfx::Matrix transform(sx, 0.0, 0.0, sy, tx, ty);

  surface->PresentExternalSurface(transform);

  transform.PreTranslate(-virtualOffset.x, -virtualOffset.y);

  // The DirectComposition API applies clipping *before* any
  // transforms/offset, whereas we want the clip applied after. Right now, we
  // only support rectilinear transforms, and then we transform our clip into
  // pre-transform coordinate space for it to be applied there.
  // DirectComposition does have an option for pre-transform clipping, if you
  // create an explicit IDCompositionEffectGroup object and set a 3D transform
  // on that. I suspect that will perform worse though, so we should only do
  // that for complex transforms (which are never provided right now).
  MOZ_ASSERT(transform.IsRectilinear());
  gfx::Rect clip = transform.Inverse().TransformBounds(gfx::Rect(
      aClipRect.min.x, aClipRect.min.y, aClipRect.width(), aClipRect.height()));
  // Set the clip rect - converting from world space to the pre-offset space
  // that DC requires for rectangle clips.
  visual->SetClip(D2DRect(clip));

  // TODO: The input matrix is a 4x4, but we only support a 3x2 at
  // the D3D API level (unless we QI to IDCompositionVisual3, which might
  // not be available?).
  // Should we assert here, or restrict at the WR API level.
  visual->SetTransform(D2DMatrix(transform));

  if (aImageRendering == wr::ImageRendering::Auto) {
    visual->SetBitmapInterpolationMode(
        DCOMPOSITION_BITMAP_INTERPOLATION_MODE_LINEAR);
  } else {
    visual->SetBitmapInterpolationMode(
        DCOMPOSITION_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR);
  }

  mCurrentLayers.push_back(aId);
}

GLuint DCLayerTree::GetOrCreateFbo(int aWidth, int aHeight) {
  const auto gl = GetGLContext();
  GLuint fboId = 0;

  // Check if we have a cached FBO with matching dimensions
  for (auto it = mFrameBuffers.begin(); it != mFrameBuffers.end(); ++it) {
    if (it->width == aWidth && it->height == aHeight) {
      fboId = it->fboId;
      it->lastFrameUsed = mCurrentFrame;
      break;
    }
  }

  // If not, create a new FBO with attached depth buffer
  if (fboId == 0) {
    // Create the depth buffer
    GLuint depthRboId;
    gl->fGenRenderbuffers(1, &depthRboId);
    gl->fBindRenderbuffer(LOCAL_GL_RENDERBUFFER, depthRboId);
    gl->fRenderbufferStorage(LOCAL_GL_RENDERBUFFER, LOCAL_GL_DEPTH_COMPONENT24,
                             aWidth, aHeight);

    // Create the framebuffer and attach the depth buffer to it
    gl->fGenFramebuffers(1, &fboId);
    gl->fBindFramebuffer(LOCAL_GL_DRAW_FRAMEBUFFER, fboId);
    gl->fFramebufferRenderbuffer(LOCAL_GL_DRAW_FRAMEBUFFER,
                                 LOCAL_GL_DEPTH_ATTACHMENT,
                                 LOCAL_GL_RENDERBUFFER, depthRboId);

    // Store this in the cache for future calls.
    // TODO(gw): Maybe we should periodically scan this list and remove old
    // entries that
    //           haven't been used for some time?
    DCLayerTree::CachedFrameBuffer frame_buffer_info;
    frame_buffer_info.width = aWidth;
    frame_buffer_info.height = aHeight;
    frame_buffer_info.fboId = fboId;
    frame_buffer_info.depthRboId = depthRboId;
    frame_buffer_info.lastFrameUsed = mCurrentFrame;
    mFrameBuffers.AppendElement(frame_buffer_info);
  }

  return fboId;
}

bool DCLayerTree::EnsureVideoProcessor(const gfx::IntSize& aInputSize,
                                       const gfx::IntSize& aOutputSize) {
  HRESULT hr;

  if (!mVideoDevice || !mVideoContext) {
    return false;
  }

  if (mVideoProcessor && (aInputSize <= mVideoInputSize) &&
      (aOutputSize <= mVideoOutputSize)) {
    return true;
  }

  mVideoProcessor = nullptr;
  mVideoProcessorEnumerator = nullptr;

  D3D11_VIDEO_PROCESSOR_CONTENT_DESC desc = {};
  desc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
  desc.InputFrameRate.Numerator = 60;
  desc.InputFrameRate.Denominator = 1;
  desc.InputWidth = aInputSize.width;
  desc.InputHeight = aInputSize.height;
  desc.OutputFrameRate.Numerator = 60;
  desc.OutputFrameRate.Denominator = 1;
  desc.OutputWidth = aOutputSize.width;
  desc.OutputHeight = aOutputSize.height;
  desc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;

  hr = mVideoDevice->CreateVideoProcessorEnumerator(
      &desc, getter_AddRefs(mVideoProcessorEnumerator));
  if (FAILED(hr)) {
    gfxCriticalNote << "Failed to create VideoProcessorEnumerator: "
                    << gfx::hexa(hr);
    return false;
  }

  hr = mVideoDevice->CreateVideoProcessor(mVideoProcessorEnumerator, 0,
                                          getter_AddRefs(mVideoProcessor));
  if (FAILED(hr)) {
    mVideoProcessor = nullptr;
    mVideoProcessorEnumerator = nullptr;
    gfxCriticalNote << "Failed to create VideoProcessor: " << gfx::hexa(hr);
    return false;
  }

  // Reduce power cosumption
  // By default, the driver might perform certain processing tasks
  // automatically
  mVideoContext->VideoProcessorSetStreamAutoProcessingMode(mVideoProcessor, 0,
                                                           FALSE);

  mVideoInputSize = aInputSize;
  mVideoOutputSize = aOutputSize;

  return true;
}

bool DCLayerTree::SupportsHardwareOverlays() {
  return sGpuOverlayInfo->mSupportsHardwareOverlays;
}

bool DCLayerTree::SupportsSwapChainTearing() {
  RefPtr<ID3D11Device> device = mDevice;
  static const bool supported = [device] {
    RefPtr<IDXGIDevice> dxgiDevice;
    RefPtr<IDXGIAdapter> adapter;
    device->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));
    dxgiDevice->GetAdapter(getter_AddRefs(adapter));

    RefPtr<IDXGIFactory5> dxgiFactory;
    HRESULT hr = adapter->GetParent(
        IID_PPV_ARGS((IDXGIFactory5**)getter_AddRefs(dxgiFactory)));
    if (FAILED(hr)) {
      return false;
    }

    BOOL presentAllowTearing = FALSE;
    hr = dxgiFactory->CheckFeatureSupport(DXGI_FEATURE_PRESENT_ALLOW_TEARING,
                                          &presentAllowTearing,
                                          sizeof(presentAllowTearing));
    if (FAILED(hr)) {
      return false;
    }

    if (auto* gpuParent = gfx::GPUParent::GetSingleton()) {
      gpuParent->NotifySwapChainInfo(
          layers::SwapChainInfo(!!presentAllowTearing));
    } else if (XRE_IsParentProcess()) {
      MOZ_ASSERT_UNREACHABLE("unexpected to be called");
    }
    return !!presentAllowTearing;
  }();
  return supported;
}

DXGI_FORMAT DCLayerTree::GetOverlayFormatForSDR() {
  return sGpuOverlayInfo->mOverlayFormatUsed;
}

static layers::OverlaySupportType FlagsToOverlaySupportType(
    UINT aFlags, bool aSoftwareOverlaySupported) {
  if (aFlags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING) {
    return layers::OverlaySupportType::Scaling;
  }
  if (aFlags & DXGI_OVERLAY_SUPPORT_FLAG_DIRECT) {
    return layers::OverlaySupportType::Direct;
  }
  if (aSoftwareOverlaySupported) {
    return layers::OverlaySupportType::Software;
  }
  return layers::OverlaySupportType::None;
}

layers::OverlayInfo DCLayerTree::GetOverlayInfo() {
  layers::OverlayInfo info;

  info.mSupportsOverlays = sGpuOverlayInfo->mSupportsHardwareOverlays;
  info.mNv12Overlay =
      FlagsToOverlaySupportType(sGpuOverlayInfo->mNv12OverlaySupportFlags,
                                /* aSoftwareOverlaySupported */ false);
  info.mYuy2Overlay =
      FlagsToOverlaySupportType(sGpuOverlayInfo->mYuy2OverlaySupportFlags,
                                /* aSoftwareOverlaySupported */ false);
  info.mBgra8Overlay =
      FlagsToOverlaySupportType(sGpuOverlayInfo->mBgra8OverlaySupportFlags,
                                /* aSoftwareOverlaySupported */ true);
  info.mRgb10a2Overlay =
      FlagsToOverlaySupportType(sGpuOverlayInfo->mRgb10a2OverlaySupportFlags,
                                /* aSoftwareOverlaySupported */ false);

  info.mSupportsVpSuperResolution = sGpuOverlayInfo->mSupportsVpSuperResolution;
  info.mSupportsVpAutoHDR = sGpuOverlayInfo->mSupportsVpAutoHDR;

  return info;
}

void DCLayerTree::SetUsedOverlayTypeInFrame(DCompOverlayTypes aTypes) {
  mUsedOverlayTypesInFrame |= aTypes;
}

DCSurface::DCSurface(wr::DeviceIntSize aTileSize,
                     wr::DeviceIntPoint aVirtualOffset, bool aIsVirtualSurface,
                     bool aIsOpaque, DCLayerTree* aDCLayerTree)
    : mIsVirtualSurface(aIsVirtualSurface),
      mDCLayerTree(aDCLayerTree),
      mTileSize(aTileSize),
      mIsOpaque(aIsOpaque),
      mAllocatedRectDirty(true),
      mVirtualOffset(aVirtualOffset) {}

DCSurface::~DCSurface() {}

bool DCSurface::Initialize() {
  // Create a visual for tiles to attach to, whether virtual or not.
  HRESULT hr;
  const auto dCompDevice = mDCLayerTree->GetCompositionDevice();
  hr = dCompDevice->CreateVisual(getter_AddRefs(mVisual));
  if (FAILED(hr)) {
    gfxCriticalNote << "Failed to create DCompositionVisual: " << gfx::hexa(hr);
    return false;
  }

  // If virtual surface is enabled, create and attach to visual, in this case
  // the tiles won't own visuals or surfaces.
  if (mIsVirtualSurface) {
    DXGI_ALPHA_MODE alpha_mode =
        mIsOpaque ? DXGI_ALPHA_MODE_IGNORE : DXGI_ALPHA_MODE_PREMULTIPLIED;

    hr = dCompDevice->CreateVirtualSurface(
        VIRTUAL_SURFACE_SIZE, VIRTUAL_SURFACE_SIZE, DXGI_FORMAT_R8G8B8A8_UNORM,
        alpha_mode, getter_AddRefs(mVirtualSurface));
    MOZ_ASSERT(SUCCEEDED(hr));

    // Bind the surface memory to this visual
    hr = mVisual->SetContent(mVirtualSurface);
    MOZ_ASSERT(SUCCEEDED(hr));
  }

  return true;
}

void DCSurface::CreateTile(int32_t aX, int32_t aY) {
  TileKey key(aX, aY);
  MOZ_RELEASE_ASSERT(mDCTiles.find(key) == mDCTiles.end());

  auto tile = MakeUnique<DCTile>(mDCLayerTree);
  if (!tile->Initialize(aX, aY, mTileSize, mIsVirtualSurface, mIsOpaque,
                        mVisual)) {
    gfxCriticalNote << "Failed to initialize DCTile: " << aX << aY;
    return;
  }

  if (mIsVirtualSurface) {
    mAllocatedRectDirty = true;
  } else {
    mVisual->AddVisual(tile->GetVisual(), false, nullptr);
  }

  mDCTiles[key] = std::move(tile);
}

void DCSurface::DestroyTile(int32_t aX, int32_t aY) {
  TileKey key(aX, aY);
  if (mIsVirtualSurface) {
    mAllocatedRectDirty = true;
  } else {
    auto tile = GetTile(aX, aY);
    mVisual->RemoveVisual(tile->GetVisual());
  }
  mDCTiles.erase(key);
}

void DCSurface::DirtyAllocatedRect() { mAllocatedRectDirty = true; }

void DCSurface::UpdateAllocatedRect() {
  if (mAllocatedRectDirty) {
    if (mVirtualSurface) {
      // The virtual surface may have holes in it (for example, an empty tile
      // that has no primitives). Instead of trimming to a single bounding
      // rect, supply the rect of each valid tile to handle this case.
      std::vector<RECT> validRects;

      for (auto it = mDCTiles.begin(); it != mDCTiles.end(); ++it) {
        auto tile = GetTile(it->first.mX, it->first.mY);
        RECT rect;

        rect.left = (LONG)(mVirtualOffset.x + it->first.mX * mTileSize.width +
                           tile->mValidRect.x);
        rect.top = (LONG)(mVirtualOffset.y + it->first.mY * mTileSize.height +
                          tile->mValidRect.y);
        rect.right = rect.left + tile->mValidRect.width;
        rect.bottom = rect.top + tile->mValidRect.height;

        validRects.push_back(rect);
      }

      mVirtualSurface->Trim(validRects.data(), validRects.size());
    }
    // When not using a virtual surface, we still want to reset this
    mAllocatedRectDirty = false;
  }
}

DCTile* DCSurface::GetTile(int32_t aX, int32_t aY) const {
  TileKey key(aX, aY);
  auto tile_it = mDCTiles.find(key);
  MOZ_RELEASE_ASSERT(tile_it != mDCTiles.end());
  return tile_it->second.get();
}

DCSwapChain::~DCSwapChain() {
  if (mEGLSurface) {
    const auto gl = mDCLayerTree->GetGLContext();

    const auto& gle = gl::GLContextEGL::Cast(gl);
    const auto& egl = gle->mEgl;
    egl->fDestroySurface(mEGLSurface);
    mEGLSurface = EGL_NO_SURFACE;
  }
}

bool DCSwapChain::Initialize() {
  DCSurface::Initialize();

  const auto gl = mDCLayerTree->GetGLContext();
  const auto& gle = gl::GLContextEGL::Cast(gl);
  const auto& egl = gle->mEgl;

  HRESULT hr;
  auto device = mDCLayerTree->GetDevice();

  RefPtr<IDXGIDevice> dxgiDevice;
  device->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));

  RefPtr<IDXGIFactory2> dxgiFactory;
  {
    RefPtr<IDXGIAdapter> adapter;
    dxgiDevice->GetAdapter(getter_AddRefs(adapter));
    adapter->GetParent(
        IID_PPV_ARGS((IDXGIFactory2**)getter_AddRefs(dxgiFactory)));
  }

  DXGI_SWAP_CHAIN_DESC1 desc{};
  desc.Width = mSize.width;
  desc.Height = mSize.height;
  desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
  desc.SampleDesc.Count = 1;
  desc.SampleDesc.Quality = 0;
  desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
  desc.BufferCount = 2;
  // DXGI_SCALING_NONE caused swap chain creation failure.
  desc.Scaling = DXGI_SCALING_STRETCH;
  desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
  desc.AlphaMode =
      mIsOpaque ? DXGI_ALPHA_MODE_IGNORE : DXGI_ALPHA_MODE_PREMULTIPLIED;
  desc.Flags = 0;

  hr = dxgiFactory->CreateSwapChainForComposition(device, &desc, nullptr,
                                                  getter_AddRefs(mSwapChain));
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));
  mVisual->SetContent(mSwapChain);

  ID3D11Texture2D* backBuffer;
  hr = mSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&backBuffer);
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));

  const EGLint pbuffer_attribs[]{LOCAL_EGL_WIDTH, mSize.width, LOCAL_EGL_HEIGHT,
                                 mSize.height, LOCAL_EGL_NONE};
  const auto buffer = reinterpret_cast<EGLClientBuffer>(backBuffer);
  EGLConfig eglConfig = mDCLayerTree->GetEGLConfig();

  mEGLSurface = egl->fCreatePbufferFromClientBuffer(
      LOCAL_EGL_D3D_TEXTURE_ANGLE, buffer, eglConfig, pbuffer_attribs);
  MOZ_RELEASE_ASSERT(mEGLSurface);
  backBuffer->Release();

  return true;
}

void DCSwapChain::Bind() {
  const auto gl = mDCLayerTree->GetGLContext();
  const auto& gle = gl::GLContextEGL::Cast(gl);

  gle->SetEGLSurfaceOverride(mEGLSurface);
  bool ok = gl->MakeCurrent();

  MOZ_RELEASE_ASSERT(ok);
}

void DCSwapChain::Resize(wr::DeviceIntSize aSize) {
  const auto gl = mDCLayerTree->GetGLContext();

  const auto& gle = gl::GLContextEGL::Cast(gl);
  const auto& egl = gle->mEgl;

  if (mEGLSurface) {
    egl->fDestroySurface(mEGLSurface);
    mEGLSurface = EGL_NO_SURFACE;
  }

  ID3D11Texture2D* backBuffer;
  DXGI_SWAP_CHAIN_DESC desc;
  HRESULT hr;

  hr = mSwapChain->GetDesc(&desc);
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));

  hr = mSwapChain->ResizeBuffers(desc.BufferCount, aSize.width, aSize.height,
                                 DXGI_FORMAT_B8G8R8A8_UNORM, 0);
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));

  hr = mSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&backBuffer);
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));

  const EGLint pbuffer_attribs[]{LOCAL_EGL_WIDTH, aSize.width, LOCAL_EGL_HEIGHT,
                                 aSize.height, LOCAL_EGL_NONE};
  const auto buffer = reinterpret_cast<EGLClientBuffer>(backBuffer);
  EGLConfig eglConfig = mDCLayerTree->GetEGLConfig();

  mEGLSurface = egl->fCreatePbufferFromClientBuffer(
      LOCAL_EGL_D3D_TEXTURE_ANGLE, buffer, eglConfig, pbuffer_attribs);
  MOZ_RELEASE_ASSERT(mEGLSurface);

  backBuffer->Release();

  mSize = aSize;
}

void DCSwapChain::Present() {
  const auto gl = mDCLayerTree->GetGLContext();
  const auto& gle = gl::GLContextEGL::Cast(gl);

  HRESULT hr = mSwapChain->Present(0, 0);
  MOZ_RELEASE_ASSERT(SUCCEEDED(hr));

  gle->SetEGLSurfaceOverride(EGL_NO_SURFACE);
}

DCSurfaceVideo::DCSurfaceVideo(bool aIsOpaque, DCLayerTree* aDCLayerTree)
    : DCSurface(wr::DeviceIntSize{}, wr::DeviceIntPoint{}, false, aIsOpaque,
                aDCLayerTree),
      mSwapChainBufferCount(
          StaticPrefs::gfx_webrender_dcomp_video_force_triple_buffering() ? 3
                                                                          : 2) {
}

DCSurfaceVideo::~DCSurfaceVideo() {
  ReleaseDecodeSwapChainResources();
  MOZ_ASSERT(!mSwapChainSurfaceHandle);
}

bool IsYUVSwapChainFormat(DXGI_FORMAT aFormat) {
  if (aFormat == DXGI_FORMAT_NV12 || aFormat == DXGI_FORMAT_YUY2) {
    return true;
  }
  return false;
}

void DCSurfaceVideo::AttachExternalImage(wr::ExternalImageId aExternalImage) {
  auto [texture, usageInfo] =
      RenderThread::Get()->GetRenderTextureAndUsageInfo(aExternalImage);
  MOZ_RELEASE_ASSERT(texture);

  if (usageInfo) {
    mRenderTextureHostUsageInfo = usageInfo;
  }

  if (mPrevTexture == texture) {
    return;
  }

  // XXX if software decoded video frame format is nv12, it could be used as
  // video overlay.
  if (!texture || !texture->AsRenderDXGITextureHost() ||
      texture->GetFormat() != gfx::SurfaceFormat::NV12) {
    gfxCriticalNote << "Unsupported RenderTexture for overlay: "
                    << gfx::hexa(texture);
    return;
  }

  mRenderTextureHost = texture;
}

bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) {
  if (!mRenderTextureHost) {
    MOZ_ASSERT_UNREACHABLE("unexpected to be called");
    return false;
  }

  const auto overlayType = mRenderTextureHost->IsSoftwareDecodedVideo()
                               ? DCompOverlayTypes::SOFTWARE_DECODED_VIDEO
                               : DCompOverlayTypes::HARDWARE_DECODED_VIDEO;
  mDCLayerTree->SetUsedOverlayTypeInFrame(overlayType);

  mVideoSize = mRenderTextureHost->AsRenderDXGITextureHost()->GetSize(0);

  // When RenderTextureHost, swapChainSize or VideoSwapChain are updated,
  // DCSurfaceVideo::PresentVideo() needs to be called.
  bool needsToPresent = mPrevTexture != mRenderTextureHost;
  gfx::IntSize swapChainSize = mVideoSize;
  gfx::Matrix transform = aTransform;
  const bool isDRM = mRenderTextureHost->IsFromDRMSource();

  // When video is rendered to axis aligned integer rectangle, video scaling
  // could be done by VideoProcessor
  bool scaleVideoAtVideoProcessor = false;
  if (StaticPrefs::gfx_webrender_dcomp_video_vp_scaling_win_AtStartup() &&
      aTransform.PreservesAxisAlignedRectangles()) {
    gfx::Size scaledSize = gfx::Size(mVideoSize) * aTransform.ScaleFactors();
    gfx::IntSize size(int32_t(std::round(scaledSize.width)),
                      int32_t(std::round(scaledSize.height)));
    if (gfx::FuzzyEqual(scaledSize.width, size.width, 0.1f) &&
        gfx::FuzzyEqual(scaledSize.height, size.height, 0.1f)) {
      scaleVideoAtVideoProcessor = true;
      swapChainSize = size;
    }
  }

  if (scaleVideoAtVideoProcessor) {
    // 4:2:2 subsampled formats like YUY2 must have an even width, and 4:2:0
    // subsampled formats like NV12 must have an even width and height.
    if (swapChainSize.width % 2 == 1) {
      swapChainSize.width += 1;
    }
    if (swapChainSize.height % 2 == 1) {
      swapChainSize.height += 1;
    }
    transform = gfx::Matrix::Translation(aTransform.GetTranslation());
  }

  if (!mDCLayerTree->EnsureVideoProcessor(mVideoSize, swapChainSize)) {
    gfxCriticalNote << "EnsureVideoProcessor Failed";
    return false;
  }

  MOZ_ASSERT(mDCLayerTree->GetVideoContext());
  MOZ_ASSERT(mDCLayerTree->GetVideoProcessor());

  const UINT vendorId = GetVendorId(mDCLayerTree->GetVideoDevice());
  const bool driverSupportsAutoHDR =
      GetVpAutoHDRSupported(vendorId, mDCLayerTree->GetVideoContext(),
                            mDCLayerTree->GetVideoProcessor());
  const bool contentIsHDR = false;  // XXX for now, only non-HDR is supported.
  const bool monitorIsHDR =
      gfx::DeviceManagerDx::Get()->WindowHDREnabled(mDCLayerTree->GetHwnd());
  const bool powerIsCharging = RenderThread::Get()->GetPowerIsCharging();

  bool useVpAutoHDR = gfx::gfxVars::WebRenderOverlayVpAutoHDR() &&
                      !contentIsHDR && monitorIsHDR && driverSupportsAutoHDR &&
                      powerIsCharging && !mVpAutoHDRFailed;

  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString str(
        "useVpAutoHDR %d gfxVars %d contentIsHDR %d monitor %d driver %d "
        "charging %d failed %d",
        useVpAutoHDR, gfx::gfxVars::WebRenderOverlayVpAutoHDR(), contentIsHDR,
        monitorIsHDR, driverSupportsAutoHDR, powerIsCharging, mVpAutoHDRFailed);
    PROFILER_MARKER_TEXT("DCSurfaceVideo", GRAPHICS, {}, str);
  }

  if (!mVideoSwapChain || mSwapChainSize != swapChainSize || mIsDRM != isDRM ||
      mUseVpAutoHDR != useVpAutoHDR) {
    needsToPresent = true;
    ReleaseDecodeSwapChainResources();
    // Update mSwapChainSize before creating SwapChain
    mSwapChainSize = swapChainSize;
    mIsDRM = isDRM;

    auto swapChainFormat = GetSwapChainFormat(useVpAutoHDR);
    bool useYUVSwapChain = IsYUVSwapChainFormat(swapChainFormat);
    if (useYUVSwapChain) {
      // Tries to create YUV SwapChain
      CreateVideoSwapChain(swapChainFormat);
      if (!mVideoSwapChain) {
        mFailedYuvSwapChain = true;
        ReleaseDecodeSwapChainResources();

        gfxCriticalNote << "Fallback to RGB SwapChain";
      }
    }
    // Tries to create RGB SwapChain
    if (!mVideoSwapChain) {
      CreateVideoSwapChain(swapChainFormat);
    }
    if (!mVideoSwapChain && useVpAutoHDR) {
      mVpAutoHDRFailed = true;
      gfxCriticalNoteOnce << "Failed to create video SwapChain for VpAutoHDR";

      // Disable VpAutoHDR
      useVpAutoHDR = false;
      swapChainFormat = GetSwapChainFormat(useVpAutoHDR);
      CreateVideoSwapChain(swapChainFormat);
    }
  }

  aTransform = transform;
  mUseVpAutoHDR = useVpAutoHDR;

  return needsToPresent;
}

void DCSurfaceVideo::PresentVideo() {
  if (!mRenderTextureHost) {
    return;
  }

  if (!mVideoSwapChain) {
    gfxCriticalNote << "Failed to create VideoSwapChain";
    RenderThread::Get()->NotifyWebRenderError(
        wr::WebRenderError::VIDEO_OVERLAY);
    return;
  }

  mVisual->SetContent(mVideoSwapChain);

  if (!CallVideoProcessorBlt()) {
    bool useYUVSwapChain = IsYUVSwapChainFormat(mSwapChainFormat);
    if (useYUVSwapChain) {
      mFailedYuvSwapChain = true;
      ReleaseDecodeSwapChainResources();
      return;
    }
    RenderThread::Get()->NotifyWebRenderError(
        wr::WebRenderError::VIDEO_OVERLAY);
    return;
  }

  const auto device = mDCLayerTree->GetDevice();
  HRESULT hr;
  if (mFirstPresent) {
    mFirstPresent = false;
    UINT flags = DXGI_PRESENT_USE_DURATION;
    // DirectComposition can display black for a swap chain between the first
    // and second time it's presented to - maybe the first Present can get lost
    // somehow and it shows the wrong buffer. In that case copy the buffers so
    // all have the correct contents, which seems to help. The first Present()
    // after this needs to have SyncInterval > 0, or else the workaround doesn't
    // help.
    for (size_t i = 0; i < mSwapChainBufferCount - 1; ++i) {
      hr = mVideoSwapChain->Present(0, flags);
      // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only
      // indicates that the window is occluded and we can stop rendering.
      if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
        gfxCriticalNoteOnce << "video Present failed during first present: "
                            << gfx::hexa(hr);
        return;
      }

      RefPtr<ID3D11Texture2D> destTexture;
      mVideoSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D),
                                 (void**)getter_AddRefs(destTexture));
      MOZ_ASSERT(destTexture);
      RefPtr<ID3D11Texture2D> srcTexture;
      hr = mVideoSwapChain->GetBuffer(1, __uuidof(ID3D11Texture2D),
                                      (void**)getter_AddRefs(srcTexture));
      MOZ_ASSERT(srcTexture);
      RefPtr<ID3D11DeviceContext> context;
      device->GetImmediateContext(getter_AddRefs(context));
      MOZ_ASSERT(context);
      context->CopyResource(destTexture, srcTexture);
    }

    // Additionally wait for the GPU to finish executing its commands, or
    // there still may be a black flicker when presenting expensive content
    // (e.g. 4k video).

    RefPtr<IDXGIDevice2> dxgiDevice2;
    device->QueryInterface((IDXGIDevice2**)getter_AddRefs(dxgiDevice2));
    MOZ_ASSERT(dxgiDevice2);

    HANDLE event = ::CreateEvent(nullptr, falsefalse, nullptr);
    hr = dxgiDevice2->EnqueueSetEvent(event);
    if (SUCCEEDED(hr)) {
      DebugOnly<DWORD> result = ::WaitForSingleObject(event, INFINITE);
      MOZ_ASSERT(result == WAIT_OBJECT_0);
    } else {
      gfxCriticalNoteOnce << "EnqueueSetEvent failed: " << gfx::hexa(hr);
    }
    ::CloseHandle(event);
  }

  UINT flags = DXGI_PRESENT_USE_DURATION;
  UINT interval = 1;
  if (StaticPrefs::gfx_webrender_dcomp_video_swap_chain_present_interval_0()) {
    interval = 0;
  }

  auto start = TimeStamp::Now();
  hr = mVideoSwapChain->Present(interval, flags);
  auto end = TimeStamp::Now();

  if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
    gfxCriticalNoteOnce << "video Present failed: " << gfx::hexa(hr);
  }

  mPrevTexture = mRenderTextureHost;

  // Disable video overlay if mVideoSwapChain->Present() is too slow. It drops
  // fps.

  if (!StaticPrefs::gfx_webrender_dcomp_video_check_slow_present()) {
    return;
  }

  const auto maxPresentWaitDurationMs = 2;
  const auto maxSlowPresentCount = 5;
  const auto presentDurationMs =
      static_cast<uint32_t>((end - start).ToMilliseconds());
  const auto overlayType = mRenderTextureHost->IsSoftwareDecodedVideo()
                               ? DCompOverlayTypes::SOFTWARE_DECODED_VIDEO
                               : DCompOverlayTypes::HARDWARE_DECODED_VIDEO;

  nsPrintfCString marker("PresentWait overlay %u %ums ", (uint8_t)overlayType,
                         presentDurationMs);
  PROFILER_MARKER_TEXT("PresentWait", GRAPHICS, {}, marker);

  if (presentDurationMs > maxPresentWaitDurationMs) {
    mSlowPresentCount++;
  } else {
    mSlowPresentCount = 0;
  }

  if (mSlowPresentCount <= maxSlowPresentCount) {
    return;
  }

  DisableVideoOverlay();

  if (overlayType == DCompOverlayTypes::SOFTWARE_DECODED_VIDEO) {
    gfxCriticalNoteOnce << "Sw video swapchain present is slow";

    nsPrintfCString marker("Sw video swapchain present is slow");
    PROFILER_MARKER_TEXT("DisableOverlay", GRAPHICS, {}, marker);
  } else {
    gfxCriticalNoteOnce << "Hw video swapchain present is slow";

    nsPrintfCString marker("Hw video swapchain present is slow");
    PROFILER_MARKER_TEXT("DisableOverlay", GRAPHICS, {}, marker);
  }
}

void DCSurfaceVideo::DisableVideoOverlay() {
  if (!mRenderTextureHostUsageInfo) {
    return;
  }
  mRenderTextureHostUsageInfo->DisableVideoOverlay();
}

DXGI_FORMAT DCSurfaceVideo::GetSwapChainFormat(bool aUseVpAutoHDR) {
  if (aUseVpAutoHDR) {
    return DXGI_FORMAT_R16G16B16A16_FLOAT;
  }
  if (mFailedYuvSwapChain || !mDCLayerTree->SupportsHardwareOverlays()) {
    return DXGI_FORMAT_B8G8R8A8_UNORM;
  }
  return mDCLayerTree->GetOverlayFormatForSDR();
}

bool DCSurfaceVideo::CreateVideoSwapChain(DXGI_FORMAT aSwapChainFormat) {
  MOZ_ASSERT(mRenderTextureHost);

  mFirstPresent = true;

  const auto device = mDCLayerTree->GetDevice();

  RefPtr<IDXGIDevice> dxgiDevice;
  device->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));

  RefPtr<IDXGIFactoryMedia> dxgiFactoryMedia;
  {
    RefPtr<IDXGIAdapter> adapter;
    dxgiDevice->GetAdapter(getter_AddRefs(adapter));
    adapter->GetParent(
        IID_PPV_ARGS((IDXGIFactoryMedia**)getter_AddRefs(dxgiFactoryMedia)));
  }

  mSwapChainSurfaceHandle = gfx::DeviceManagerDx::CreateDCompSurfaceHandle();
  if (!mSwapChainSurfaceHandle) {
    gfxCriticalNote << "Failed to create DCompSurfaceHandle";
    return false;
  }

  DXGI_SWAP_CHAIN_DESC1 desc = {};
  desc.Width = mSwapChainSize.width;
  desc.Height = mSwapChainSize.height;
  desc.Format = aSwapChainFormat;
  desc.Stereo = FALSE;
  desc.SampleDesc.Count = 1;
  desc.BufferCount = mSwapChainBufferCount;
  desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
  desc.Scaling = DXGI_SCALING_STRETCH;
  desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
  desc.Flags = DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;
  if (IsYUVSwapChainFormat(aSwapChainFormat)) {
    desc.Flags |= DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO;
  }
  if (mIsDRM) {
    desc.Flags |= DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
  }
  desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;

  HRESULT hr;
  hr = dxgiFactoryMedia->CreateSwapChainForCompositionSurfaceHandle(
      device, mSwapChainSurfaceHandle, &desc, nullptr,
      getter_AddRefs(mVideoSwapChain));

  if (FAILED(hr)) {
    gfxCriticalNote << "Failed to create video SwapChain: " << gfx::hexa(hr)
                    << " " << mSwapChainSize;
    return false;
  }

  mSwapChainFormat = aSwapChainFormat;
  return true;
}

// TODO: Replace with YUVRangedColorSpace
static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace(
    const gfx::YUVColorSpace aYUVColorSpace,
    const gfx::ColorRange aColorRange) {
  if (aYUVColorSpace == gfx::YUVColorSpace::BT601) {
    if (aColorRange == gfx::ColorRange::FULL) {
      return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601);
    } else {
      return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P601);
    }
  } else if (aYUVColorSpace == gfx::YUVColorSpace::BT709) {
    if (aColorRange == gfx::ColorRange::FULL) {
      return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P709);
    } else {
      return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709);
    }
  } else if (aYUVColorSpace == gfx::YUVColorSpace::BT2020) {
    if (aColorRange == gfx::ColorRange::FULL) {
      // XXX Add SMPTEST2084 handling. HDR content is not handled yet by
      // video overlay.
      return Some(DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020);
    } else {
      return Some(DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020);
    }
  }

  return Nothing();
}

static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace(
    const gfx::YUVRangedColorSpace aYUVColorSpace) {
  const auto info = FromYUVRangedColorSpace(aYUVColorSpace);
  return GetSourceDXGIColorSpace(info.space, info.range);
}

bool DCSurfaceVideo::CallVideoProcessorBlt() {
  MOZ_ASSERT(mRenderTextureHost);

  HRESULT hr;
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=90 H=97 G=93

¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.24Angebot  Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können  ¤

*Eine klare Vorstellung vom Zielzustand






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.