Prevent extra copy in RasterDecoder::CopySubTextureInternalSKIA
Currently when using OneCopyRasterBufferProvider we perform an upload
from CPU shared memory to the GPU and then a GPU to GPU copy during
raster playback. This CL allows direct upload of CPU shared memory
to the output shared image texture through the use of a new hint in
RasterDecoder::CopySubTexture that indicates CPU memory should be used
directly. This also allows us to unify how the copy is implemented
in RasterDecoder to always use Skia.
Bug: 984045
Change-Id: I6ac7d238cf3b2f43562481e2e1b36b3c4670a6f5
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2517149
Commit-Queue: Nathan Zabriskie <nazabris@microsoft.com>
Reviewed-by: Vasiliy Telezhnikov <vasilyt@chromium.org>
Reviewed-by: Sunny Sachanandani <sunnyps@chromium.org>
Cr-Commit-Position: refs/heads/master@{#841147}
diff --git a/gpu/command_buffer/service/raster_decoder.cc b/gpu/command_buffer/service/raster_decoder.cc
index 80b68fa9..526399e 100644
--- a/gpu/command_buffer/service/raster_decoder.cc
+++ b/gpu/command_buffer/service/raster_decoder.cc
@@ -572,6 +572,20 @@
GLboolean unpack_flip_y,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox);
+ bool TryCopySubTextureINTERNALMemory(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ gfx::Rect dest_cleared_rect,
+ GLboolean unpack_flip_y,
+ const Mailbox& source_mailbox,
+ SharedImageRepresentationSkia* dest_shared_image,
+ SharedImageRepresentationSkia::ScopedWriteAccess* dest_scoped_access,
+ const std::vector<GrBackendSemaphore>& begin_semaphores,
+ std::vector<GrBackendSemaphore>& end_semaphores);
void DoWritePixelsINTERNAL(GLint x_offset,
GLint y_offset,
GLuint src_width,
@@ -2258,24 +2272,13 @@
const Mailbox& dest_mailbox) {
DCHECK(source_mailbox != dest_mailbox);
- // Use Skia to copy texture if raster's gr_context() is not using GL.
- auto source_shared_image = shared_image_representation_factory_.ProduceSkia(
- source_mailbox, shared_context_state_);
auto dest_shared_image = shared_image_representation_factory_.ProduceSkia(
dest_mailbox, shared_context_state_);
- if (!source_shared_image || !dest_shared_image) {
+ if (!dest_shared_image) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
return;
}
- gfx::Size source_size = source_shared_image->size();
- gfx::Rect source_rect(x, y, width, height);
- if (!gfx::Rect(source_size).Contains(source_rect)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "source texture bad dimensions.");
- return;
- }
-
gfx::Size dest_size = dest_shared_image->size();
gfx::Rect dest_rect(xoffset, yoffset, width, height);
if (!gfx::Rect(dest_size).Contains(dest_rect)) {
@@ -2312,12 +2315,31 @@
return;
}
- // With OneCopyRasterBufferProvider, source_shared_image->BeginReadAccess()
- // will copy pixels from SHM GMB to the texture in |source_shared_image|,
- // and then use drawImageRect() to draw that texure to the target
- // |dest_shared_image|. We can save one copy by drawing the SHM GMB to the
- // target |dest_shared_image| directly.
- // TODO(penghuang): get rid of the one extra copy. https://crbug.com/984045
+ // Attempt to upload directly from CPU shared memory to destination texture.
+ if (TryCopySubTextureINTERNALMemory(
+ xoffset, yoffset, x, y, width, height, new_cleared_rect,
+ unpack_flip_y, source_mailbox, dest_shared_image.get(),
+ dest_scoped_access.get(), begin_semaphores, end_semaphores)) {
+ return;
+ }
+
+ // Fall back to GPU->GPU copy if src image is not CPU-backed.
+ auto source_shared_image = shared_image_representation_factory_.ProduceSkia(
+ source_mailbox, shared_context_state_);
+ if (!source_shared_image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unknown source image mailbox.");
+ return;
+ }
+
+ gfx::Size source_size = source_shared_image->size();
+ gfx::Rect source_rect(x, y, width, height);
+ if (!gfx::Rect(source_size).Contains(source_rect)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "source texture bad dimensions.");
+ return;
+ }
+
std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
source_scoped_access = source_shared_image->BeginScopedReadAccess(
&begin_semaphores, &end_semaphores);
@@ -2358,6 +2380,59 @@
}
}
+bool RasterDecoderImpl::TryCopySubTextureINTERNALMemory(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ gfx::Rect dest_cleared_rect,
+ GLboolean unpack_flip_y,
+ const Mailbox& source_mailbox,
+ SharedImageRepresentationSkia* dest_shared_image,
+ SharedImageRepresentationSkia::ScopedWriteAccess* dest_scoped_access,
+ const std::vector<GrBackendSemaphore>& begin_semaphores,
+ std::vector<GrBackendSemaphore>& end_semaphores) {
+ if (unpack_flip_y || x != 0 || y != 0)
+ return false;
+
+ auto source_shared_image =
+ shared_image_representation_factory_.ProduceMemory(source_mailbox);
+ if (!source_shared_image)
+ return false;
+
+ gfx::Size source_size = source_shared_image->size();
+ gfx::Rect source_rect(x, y, width, height);
+ if (!gfx::Rect(source_size).Contains(source_rect))
+ return false;
+
+ auto scoped_read_access = source_shared_image->BeginScopedReadAccess();
+ if (!scoped_read_access)
+ return false;
+
+ SkPixmap pm = scoped_read_access->pixmap();
+ if (pm.width() != source_rect.width() || pm.height() != source_rect.height())
+ return false;
+
+ if (!begin_semaphores.empty()) {
+ bool result = dest_scoped_access->surface()->wait(
+ begin_semaphores.size(), begin_semaphores.data(),
+ /*deleteSemaphoresAfterWait=*/false);
+ DCHECK(result);
+ }
+
+ dest_scoped_access->surface()->writePixels(pm, xoffset, yoffset);
+
+ FlushAndSubmitIfNecessary(dest_scoped_access->surface(),
+ std::move(end_semaphores));
+ if (!dest_shared_image->IsCleared()) {
+ dest_shared_image->SetClearedRect(dest_cleared_rect);
+ }
+
+ return true;
+}
+
void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
GLint y_offset,
GLuint src_width,