[go: nahoru, domu]

Migrate content/common/gpu/media code to media/gpu

This is part of a gpu & media refactor to enable proper layering for Mus. See bug for details.

BUG=586386

Review-Url: https://codereview.chromium.org/1882373004
Cr-Commit-Position: refs/heads/master@{#390896}
diff --git a/BUILD.gn b/BUILD.gn
index b4d3ae9d..96fa27f2 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -95,9 +95,9 @@
       "//breakpad:minidump_stackwalk($host_toolchain)",
       "//chrome",
       "//chrome/test/chromedriver",
-      "//content/test:video_decode_accelerator_unittest",
-      "//content/test:video_encode_accelerator_unittest",
       "//media:media_unittests",
+      "//media/gpu:video_decode_accelerator_unittest",
+      "//media/gpu:video_encode_accelerator_unittest",
       "//ppapi/examples/video_decode",
       "//sandbox/linux:chrome_sandbox",
       "//sandbox/linux:sandbox_linux_unittests",
@@ -312,7 +312,7 @@
       "//components/policy/android:components_policy_junit_tests",
       "//content/public/android:content_junit_tests",
       "//content/shell/android:content_shell_apk",
-      "//content/test:video_decode_accelerator_unittest",
+      "//media/gpu:video_decode_accelerator_unittest",
       "//net/android:net_junit_tests",
       "//testing/android/junit:junit_unit_tests",
       "//third_party/android_tools:uiautomator_java",
diff --git a/build/all.gyp b/build/all.gyp
index 6f71d1d3..bfc96d5 100644
--- a/build/all.gyp
+++ b/build/all.gyp
@@ -323,7 +323,7 @@
         ['OS!="ios"', {
           'dependencies': [
             '../ui/gl/gl_tests.gyp:gl_unittests',
-	    '../url/ipc/url_ipc.gyp:url_ipc_unittests',
+            '../url/ipc/url_ipc.gyp:url_ipc_unittests',
           ],
         }],
         ['OS!="ios" and OS!="mac"', {
@@ -604,7 +604,7 @@
             '../gpu/gpu.gyp:gl_tests',
             '../gpu/gpu.gyp:angle_unittests',
             '../gpu/gpu.gyp:gpu_unittests',
-	    '../gpu/gpu.gyp:command_buffer_gles2_tests',
+            '../gpu/gpu.gyp:command_buffer_gles2_tests',
             '../third_party/catapult/telemetry/telemetry.gyp:*',
           ],
           'conditions': [
@@ -631,7 +631,7 @@
             '../gpu/gpu.gyp:gl_tests',
             '../gpu/gpu.gyp:angle_unittests',
             '../gpu/gpu.gyp:gpu_unittests',
-	    '../gpu/gpu.gyp:command_buffer_gles2_tests',
+            '../gpu/gpu.gyp:command_buffer_gles2_tests',
             '../third_party/catapult/telemetry/telemetry.gyp:*',
           ],
           'conditions': [
@@ -816,12 +816,12 @@
             '../components/components_tests.gyp:components_unittests_apk',
             '../content/content_shell_and_tests.gyp:content_browsertests_apk',
             '../content/content_shell_and_tests.gyp:content_unittests_apk',
-            '../content/content_shell_and_tests.gyp:video_decode_accelerator_unittest_apk',
             '../gpu/gpu.gyp:command_buffer_gles2_tests_apk',
             '../gpu/gpu.gyp:gl_tests_apk',
             '../gpu/gpu.gyp:gpu_unittests_apk',
             '../ipc/ipc.gyp:ipc_tests_apk',
             '../media/media.gyp:media_unittests_apk',
+            '../media/media.gyp:video_decode_accelerator_unittest_apk',
             '../media/midi/midi.gyp:midi_unittests_apk',
             '../net/net.gyp:net_unittests_apk',
             '../skia/skia_tests.gyp:skia_unittests_apk',
@@ -1192,14 +1192,14 @@
             '../breakpad/breakpad.gyp:minidump_stackwalk',
             '../chrome/chrome.gyp:chrome',
             '../chrome/chrome.gyp:chromedriver',
-            '../content/content_shell_and_tests.gyp:video_decode_accelerator_unittest',
-            '../content/content_shell_and_tests.gyp:video_encode_accelerator_unittest',
             '../media/media.gyp:media_unittests',
+            '../media/media.gyp:video_decode_accelerator_unittest',
+            '../media/media.gyp:video_encode_accelerator_unittest',
             '../ppapi/ppapi_internal.gyp:ppapi_example_video_decode',
             '../sandbox/sandbox.gyp:chrome_sandbox',
             '../sandbox/sandbox.gyp:sandbox_linux_unittests',
-            '../third_party/mesa/mesa.gyp:osmesa',
             '../third_party/catapult/telemetry/telemetry.gyp:bitmaptools#host',
+            '../third_party/mesa/mesa.gyp:osmesa',
             '../tools/perf/clear_system_cache/clear_system_cache.gyp:clear_system_cache',
           ],
           'conditions': [
diff --git a/build/gn_migration.gypi b/build/gn_migration.gypi
index 8fa60324..3f3358d 100644
--- a/build/gn_migration.gypi
+++ b/build/gn_migration.gypi
@@ -230,7 +230,6 @@
             '../content/content_shell_and_tests.gyp:content_shell_apk',
             '../content/content_shell_and_tests.gyp:content_shell_test_apk',
             '../content/content_shell_and_tests.gyp:content_unittests_apk',
-            '../content/content_shell_and_tests.gyp:video_decode_accelerator_unittest_apk',
             '../device/device_tests.gyp:device_unittests_apk',
             '../gpu/gpu.gyp:command_buffer_gles2_tests_apk',
             '../gpu/gpu.gyp:gl_tests_apk',
@@ -240,6 +239,7 @@
             '../media/cast/cast.gyp:cast_unittests_apk',
             '../media/media.gyp:media_perftests_apk',
             '../media/media.gyp:media_unittests_apk',
+            '../media/media.gyp:video_decode_accelerator_unittest_apk',
             '../media/midi/midi.gyp:midi_unittests_apk',
             '../net/net.gyp:net_junit_tests',
             '../net/net.gyp:net_unittests_apk',
@@ -682,22 +682,22 @@
         }],
         ['chromeos==1', {
           'dependencies': [
-            '../content/content_shell_and_tests.gyp:jpeg_decode_accelerator_unittest',
+            '../media/media.gyp:jpeg_decode_accelerator_unittest',
           ],
         }],
         ['chromeos==1 or OS=="mac"', {
           'dependencies': [
-            '../content/content_shell_and_tests.gyp:video_encode_accelerator_unittest',
+            '../media/media.gyp:video_encode_accelerator_unittest',
           ],
         }],
         ['chromeos==1 and target_arch != "arm"', {
           'dependencies': [
-            '../content/content_shell_and_tests.gyp:vaapi_jpeg_decoder_unittest',
+            '../media/media.gyp:vaapi_jpeg_decoder_unittest',
           ],
         }],
         ['chromeos==1 or OS=="win" or OS=="android"', {
           'dependencies': [
-            '../content/content_shell_and_tests.gyp:video_decode_accelerator_unittest',
+            '../media/media.gyp:video_decode_accelerator_unittest',
           ],
         }],
         ['OS=="linux" or OS=="win"', {
diff --git a/content/DEPS b/content/DEPS
index 1606072..3c3a18dc 100644
--- a/content/DEPS
+++ b/content/DEPS
@@ -27,6 +27,7 @@
 
   "+dbus",
   "+gpu",
+  "+media",
   "+mojo/common",
   "+mojo/edk/embedder",
   "+mojo/edk/js",
@@ -118,4 +119,3 @@
     "+content/shell/common",
   ],
 }
-
diff --git a/content/common/BUILD.gn b/content/common/BUILD.gn
index e37feaf..8329118 100644
--- a/content/common/BUILD.gn
+++ b/content/common/BUILD.gn
@@ -11,120 +11,6 @@
   import("//build/config/mac/mac_sdk.gni")
 }
 
-if (is_chromeos && current_cpu != "arm") {
-  action("libva_generate_stubs") {
-    extra_header = "gpu/media/va_stub_header.fragment"
-
-    script = "../../tools/generate_stubs/generate_stubs.py"
-    sources = [
-      "gpu/media/va.sigs",
-    ]
-    inputs = [
-      extra_header,
-    ]
-    if (use_x11) {
-      sources += [ "gpu/media/va_x11.sigs" ]
-    }
-    if (use_ozone) {
-      sources += [ "gpu/media/va_drm.sigs" ]
-    }
-    stubs_filename_root = "va_stubs"
-
-    outputs = [
-      "$target_gen_dir/gpu/media/$stubs_filename_root.cc",
-      "$target_gen_dir/gpu/media/$stubs_filename_root.h",
-    ]
-    args = [
-      "-i",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-o",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-t",
-      "posix_stubs",
-      "-e",
-      rebase_path(extra_header, root_build_dir),
-      "-s",
-      stubs_filename_root,
-      "-p",
-      "content/common/gpu/media",
-    ]
-
-    args += rebase_path(sources, root_build_dir)
-  }
-}
-
-if (is_chromeos && use_v4lplugin) {
-  action("libv4l2_generate_stubs") {
-    extra_header = "gpu/media/v4l2_stub_header.fragment"
-
-    script = "../../tools/generate_stubs/generate_stubs.py"
-    sources = [
-      "gpu/media/v4l2.sig",
-    ]
-    inputs = [
-      extra_header,
-    ]
-    stubs_filename_root = "v4l2_stubs"
-
-    outputs = [
-      "$target_gen_dir/gpu/media/$stubs_filename_root.cc",
-      "$target_gen_dir/gpu/media/$stubs_filename_root.h",
-    ]
-    args = [
-      "-i",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-o",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-t",
-      "posix_stubs",
-      "-e",
-      rebase_path(extra_header, root_build_dir),
-      "-s",
-      stubs_filename_root,
-      "-p",
-      "content/common/gpu/media",
-    ]
-
-    args += rebase_path(sources, root_build_dir)
-  }
-}
-
-if (is_mac) {
-  action("libvt_generate_stubs") {
-    extra_header = "gpu/media/vt_stubs_header.fragment"
-
-    script = "../../tools/generate_stubs/generate_stubs.py"
-    sources = [
-      "gpu/media/vt.sig",
-    ]
-    inputs = [
-      extra_header,
-    ]
-    stubs_filename_root = "vt_stubs"
-
-    outputs = [
-      "$target_gen_dir/gpu/media/$stubs_filename_root.cc",
-      "$target_gen_dir/gpu/media/$stubs_filename_root.h",
-    ]
-    args = [
-      "-i",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-o",
-      rebase_path("$target_gen_dir/gpu/media", root_build_dir),
-      "-t",
-      "posix_stubs",
-      "-e",
-      rebase_path(extra_header, root_build_dir),
-      "-s",
-      stubs_filename_root,
-      "-p",
-      "content/common/gpu/media",
-    ]
-
-    args += rebase_path(sources, root_build_dir)
-  }
-}
-
 source_set("common") {
   # Targets external to content should always link to the public API.
   # In addition, targets outside of the content component (shell and tests)
@@ -167,10 +53,6 @@
     "//gpu/command_buffer/service",
     "//gpu/ipc/client",
     "//gpu/ipc/common",
-
-    # TODO(markdittmer): This should be removed once content/common/gpu/media
-    # is refactored into media/ipc.
-    "//gpu/ipc/service",
     "//gpu/skia_bindings",
     "//ipc",
     "//ipc/mojo",
@@ -224,33 +106,7 @@
   }
 
   if (is_mac) {
-    sources += [
-                 "gpu/media/vt_mac.h",
-                 "gpu/media/vt_video_decode_accelerator_mac.cc",
-                 "gpu/media/vt_video_decode_accelerator_mac.h",
-                 "gpu/media/vt_video_encode_accelerator_mac.cc",
-                 "gpu/media/vt_video_encode_accelerator_mac.h",
-               ] + get_target_outputs(":libvt_generate_stubs")
-
-    deps += [
-      ":libvt_generate_stubs",
-      "//content:resources",
-      "//content/app/resources",
-      "//sandbox/mac:seatbelt",
-      "//third_party/WebKit/public:image_resources",
-      "//third_party/WebKit/public:resources",
-      "//ui/accelerated_widget_mac",
-    ]
-    lib_dirs = [ "$mac_sdk_path/usr/lib" ]
-    libs += [
-      "AVFoundation.framework",
-      "CoreMedia.framework",
-      "CoreVideo.framework",
-      "IOSurface.framework",
-      "OpenGL.framework",
-      "QuartzCore.framework",
-      "sandbox",
-    ]
+    deps += [ "//sandbox/mac:seatbelt" ]
   }
 
   if (is_android) {
@@ -313,155 +169,6 @@
     ]
   }
 
-  if (is_android) {
-    sources += [
-      "gpu/media/android_copying_backing_strategy.cc",
-      "gpu/media/android_copying_backing_strategy.h",
-      "gpu/media/android_deferred_rendering_backing_strategy.cc",
-      "gpu/media/android_deferred_rendering_backing_strategy.h",
-      "gpu/media/android_video_decode_accelerator.cc",
-      "gpu/media/android_video_decode_accelerator.h",
-      "gpu/media/avda_codec_image.cc",
-      "gpu/media/avda_codec_image.h",
-      "gpu/media/avda_return_on_failure.h",
-      "gpu/media/avda_shared_state.cc",
-      "gpu/media/avda_shared_state.h",
-      "gpu/media/avda_state_provider.h",
-      "gpu/media/avda_surface_tracker.cc",
-      "gpu/media/avda_surface_tracker.h",
-    ]
-
-    if (enable_webrtc) {
-      deps += [ "//third_party/libyuv" ]
-      sources += [
-        "gpu/media/android_video_encode_accelerator.cc",
-        "gpu/media/android_video_encode_accelerator.h",
-      ]
-    }
-
-    if (mojo_media_host == "gpu") {
-      deps += [ "//media/mojo/services:cdm_service" ]
-    }
-  }
-
-  if (is_chromeos) {
-    sources += [
-      "gpu/media/accelerated_video_decoder.h",
-      "gpu/media/h264_decoder.cc",
-      "gpu/media/h264_decoder.h",
-      "gpu/media/h264_dpb.cc",
-      "gpu/media/h264_dpb.h",
-      "gpu/media/vp8_decoder.cc",
-      "gpu/media/vp8_decoder.h",
-      "gpu/media/vp8_picture.cc",
-      "gpu/media/vp8_picture.h",
-      "gpu/media/vp9_decoder.cc",
-      "gpu/media/vp9_decoder.h",
-      "gpu/media/vp9_picture.cc",
-      "gpu/media/vp9_picture.h",
-    ]
-    if (use_v4lplugin) {
-      defines += [ "USE_LIBV4L2" ]
-      sources += get_target_outputs(":libv4l2_generate_stubs")
-      deps += [ ":libv4l2_generate_stubs" ]
-    }
-    if (use_v4l2_codec) {
-      defines += [ "USE_V4L2_CODEC" ]
-      deps += [ "//third_party/libyuv" ]
-      sources += [
-        "gpu/media/generic_v4l2_device.cc",
-        "gpu/media/generic_v4l2_device.h",
-        "gpu/media/v4l2_device.cc",
-        "gpu/media/v4l2_device.h",
-        "gpu/media/v4l2_image_processor.cc",
-        "gpu/media/v4l2_image_processor.h",
-        "gpu/media/v4l2_jpeg_decode_accelerator.cc",
-        "gpu/media/v4l2_jpeg_decode_accelerator.h",
-        "gpu/media/v4l2_slice_video_decode_accelerator.cc",
-        "gpu/media/v4l2_slice_video_decode_accelerator.h",
-        "gpu/media/v4l2_video_decode_accelerator.cc",
-        "gpu/media/v4l2_video_decode_accelerator.h",
-        "gpu/media/v4l2_video_encode_accelerator.cc",
-        "gpu/media/v4l2_video_encode_accelerator.h",
-      ]
-      libs = [
-        "EGL",
-        "GLESv2",
-      ]
-    }
-    if (current_cpu == "arm") {
-      sources += [
-        "gpu/media/tegra_v4l2_device.cc",
-        "gpu/media/tegra_v4l2_device.h",
-      ]
-    }
-    if (current_cpu != "arm") {
-      sources += [
-                   "gpu/media/va_surface.h",
-                   "gpu/media/vaapi_jpeg_decode_accelerator.cc",
-                   "gpu/media/vaapi_jpeg_decode_accelerator.h",
-                   "gpu/media/vaapi_jpeg_decoder.cc",
-                   "gpu/media/vaapi_jpeg_decoder.h",
-                   "gpu/media/vaapi_picture.cc",
-                   "gpu/media/vaapi_picture.h",
-                   "gpu/media/vaapi_video_decode_accelerator.cc",
-                   "gpu/media/vaapi_video_decode_accelerator.h",
-                   "gpu/media/vaapi_video_encode_accelerator.cc",
-                   "gpu/media/vaapi_video_encode_accelerator.h",
-                   "gpu/media/vaapi_wrapper.cc",
-                   "gpu/media/vaapi_wrapper.h",
-                 ] + get_target_outputs(":libva_generate_stubs")
-      configs += [
-        "//third_party/libva:libva_config",
-        "//third_party/libyuv:libyuv_config",
-      ]
-      deps += [
-        ":libva_generate_stubs",
-        "//media",
-        "//third_party/libyuv",
-      ]
-      if (use_x11) {
-        sources += [
-          "gpu/media/vaapi_tfp_picture.cc",
-          "gpu/media/vaapi_tfp_picture.h",
-        ]
-      }
-      if (use_ozone) {
-        sources += [
-          "gpu/media/vaapi_drm_picture.cc",
-          "gpu/media/vaapi_drm_picture.h",
-        ]
-      }
-    }
-  }
-
-  if (is_win) {
-    sources += [
-      "gpu/media/dxva_video_decode_accelerator_win.cc",
-      "gpu/media/dxva_video_decode_accelerator_win.h",
-    ]
-    configs += [ "//third_party/khronos:khronos_headers" ]
-    deps += [ "//ui/gl" ]
-    libs += [
-      "d3d9.lib",
-      "d3d11.lib",
-      "dxva2.lib",
-      "strmiids.lib",
-      "mf.lib",
-      "mfplat.lib",
-      "mfuuid.lib",
-    ]
-    ldflags += [
-      "/DELAYLOAD:d3d9.dll",
-      "/DELAYLOAD:d3d11.dll",
-      "/DELAYLOAD:dxva2.dll",
-      "/DELAYLOAD:mf.dll",
-      "/DELAYLOAD:mfplat.dll",
-    ]
-
-    # TODO(GYP): extract_xinput action.
-  }
-
   if (!is_win || !use_aura) {
     sources -= [ "cursors/webcursor_aurawin.cc" ]
   }
diff --git a/content/common/common.gni b/content/common/common.gni
index 597cd1b5..8a794fa 100644
--- a/content/common/common.gni
+++ b/content/common/common.gni
@@ -11,12 +11,3 @@
                 [ rebase_path("../content_common.gypi") ],
                 "scope",
                 [ "../content_common.gypi" ])
-
-declare_args() {
-  # Indicates if V4L plugin is used.
-  use_v4lplugin = false
-
-  # Indicates if Video4Linux2 codec is used. This is used for all CrOS
-  # platforms which have v4l2 hardware encoder / decoder.
-  use_v4l2_codec = false
-}
diff --git a/content/common/gpu/media/DEPS b/content/common/gpu/media/DEPS
deleted file mode 100644
index dac3d1ca..0000000
--- a/content/common/gpu/media/DEPS
+++ /dev/null
@@ -1,9 +0,0 @@
-include_rules = [
-  "+media",
-  "+third_party/libva",
-  "+third_party/libyuv",
-  "+ui/display/chromeos",
-  "+ui/display/types",
-  "+ui/platform_window",
-  "+third_party/v4l-utils",
-]
diff --git a/content/common/gpu/media/OWNERS b/content/common/gpu/media/OWNERS
deleted file mode 100644
index a6aaeeb..0000000
--- a/content/common/gpu/media/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# Media gpu owners.
-file://media/gpu/OWNERS
diff --git a/content/common/gpu/media/vt_mac.h b/content/common/gpu/media/vt_mac.h
deleted file mode 100644
index fcb7d21..0000000
--- a/content/common/gpu/media/vt_mac.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_MEDIA_VT_H_
-#define CONTENT_COMMON_GPU_MEDIA_VT_H_
-
-// Dynamic library loader.
-#include "content/common/gpu/media/vt_stubs.h"
-
-// CoreMedia and VideoToolbox types.
-#include "content/common/gpu/media/vt_stubs_header.fragment"
-
-// CoreMedia and VideoToolbox functions.
-extern "C" {
-#include "content/common/gpu/media/vt.sig"
-}  // extern "C"
-
-#endif  // CONTENT_COMMON_GPU_MEDIA_VT_H_
diff --git a/content/common/sandbox_mac.mm b/content/common/sandbox_mac.mm
index c7a2cdd7..73fee33 100644
--- a/content/common/sandbox_mac.mm
+++ b/content/common/sandbox_mac.mm
@@ -32,10 +32,10 @@
 #include "base/strings/sys_string_conversions.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/sys_info.h"
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
 #include "content/grit/content_resources.h"
 #include "content/public/common/content_client.h"
 #include "content/public/common/content_switches.h"
+#include "media/gpu/vt_video_decode_accelerator_mac.h"
 #include "sandbox/mac/seatbelt.h"
 #include "third_party/icu/source/common/unicode/uchar.h"
 #include "ui/base/layout.h"
@@ -332,7 +332,7 @@
     gfx::GLSurface::InitializeOneOff();
 
     // Preload VideoToolbox.
-    InitializeVideoToolbox();
+    media::InitializeVideoToolbox();
   }
 
   if (sandbox_type == SANDBOX_TYPE_PPAPI) {
diff --git a/content/content_common.gypi b/content/content_common.gypi
index b46b450b..8609ee01 100644
--- a/content/content_common.gypi
+++ b/content/content_common.gypi
@@ -12,11 +12,6 @@
     '../gpu/gpu.gyp:command_buffer_service',
     '../gpu/gpu.gyp:gles2_c_lib',
     '../gpu/gpu.gyp:gles2_implementation',
-
-    # TODO(markdittmer): This should be removed once content/common/gpu/media
-    # is refactored into media/ipc.
-    '../gpu/gpu.gyp:gpu_ipc_service',
-
     '../gpu/gpu.gyp:gpu',
     '../gpu/skia_bindings/skia_bindings.gyp:gpu_skia_bindings',
     '../ipc/ipc.gyp:ipc',
@@ -92,8 +87,6 @@
     '../ui/accessibility/accessibility.gyp:ax_gen',
   ],
   'variables': {
-    'use_v4lplugin%': 0,
-    'use_v4l2_codec%': 0,
     'public_common_sources': [
       'public/common/appcache_info.h',
       'public/common/bindings_policy.h',
@@ -354,23 +347,6 @@
       'common/gpu/client/context_provider_command_buffer.h',
       'common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc',
       'common/gpu/client/webgraphicscontext3d_command_buffer_impl.h',
-      'common/gpu/media/fake_video_decode_accelerator.cc',
-      'common/gpu/media/fake_video_decode_accelerator.h',
-      'common/gpu/media/gpu_jpeg_decode_accelerator.cc',
-      'common/gpu/media/gpu_jpeg_decode_accelerator.h',
-      'common/gpu/media/gpu_video_decode_accelerator.cc',
-      'common/gpu/media/gpu_video_decode_accelerator.h',
-      'common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc',
-      'common/gpu/media/gpu_video_decode_accelerator_factory_impl.h',
-      'common/gpu/media/gpu_video_decode_accelerator_helpers.h',
-      'common/gpu/media/gpu_video_encode_accelerator.cc',
-      'common/gpu/media/gpu_video_encode_accelerator.h',
-      'common/gpu/media/media_channel.cc',
-      'common/gpu/media/media_channel.h',
-      'common/gpu/media/media_service.cc',
-      'common/gpu/media/media_service.h',
-      'common/gpu/media/shared_memory_region.cc',
-      'common/gpu/media/shared_memory_region.h',
       'common/gpu_host_messages.h',
       'common/gpu_process_launch_causes.h',
       'common/host_discardable_shared_memory_manager.cc',
@@ -595,67 +571,12 @@
   ],
   'conditions': [
     ['OS=="mac"', {
-      'dependencies': [
-        '../media/media.gyp:media',
-        '../sandbox/sandbox.gyp:seatbelt',
-        'app/resources/content_resources.gyp:content_resources',
-        '../ui/accelerated_widget_mac/accelerated_widget_mac.gyp:accelerated_widget_mac'
-      ],
-      'sources': [
-        'common/gpu/media/vt_mac.h',
-        'common/gpu/media/vt_video_decode_accelerator_mac.cc',
-        'common/gpu/media/vt_video_decode_accelerator_mac.h',
-        'common/gpu/media/vt_video_encode_accelerator_mac.cc',
-        'common/gpu/media/vt_video_encode_accelerator_mac.h',
-      ],
-      'link_settings': {
-        'libraries': [
-          '$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
-          '$(SDKROOT)/System/Library/Frameworks/CoreMedia.framework',
-          '$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
-          '$(SDKROOT)/System/Library/Frameworks/IOSurface.framework',
-          '$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
-          '$(SDKROOT)/usr/lib/libsandbox.dylib',
-        ],
-      },
+      'dependencies': [ '../sandbox/sandbox.gyp:seatbelt' ],
       'variables': {
-        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
-        'extra_header': 'common/gpu/media/vt_stubs_header.fragment',
-        'sig_files': ['common/gpu/media/vt.sig'],
-        'outfile_type': 'posix_stubs',
-        'stubs_filename_root': 'vt_stubs',
-        'project_path': 'content/common/gpu/media',
-        'intermediate_dir': '<(INTERMEDIATE_DIR)',
-        'output_root': '<(SHARED_INTERMEDIATE_DIR)/vt_stubs',
+        'vt_stubs_output_root': '<(SHARED_INTERMEDIATE_DIR)/vt_stubs',
       },
       'include_dirs': [
-        '<(output_root)',
-      ],
-      'actions': [
-        {
-          'action_name': 'generate_stubs',
-          'inputs': [
-            '<(generate_stubs_script)',
-            '<(extra_header)',
-            '<@(sig_files)',
-          ],
-          'outputs': [
-            '<(intermediate_dir)/<(stubs_filename_root).cc',
-            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
-          ],
-          'action': ['python',
-                     '<(generate_stubs_script)',
-                     '-i', '<(intermediate_dir)',
-                     '-o', '<(output_root)/<(project_path)',
-                     '-t', '<(outfile_type)',
-                     '-e', '<(extra_header)',
-                     '-s', '<(stubs_filename_root)',
-                     '-p', '<(project_path)',
-                     '<@(_inputs)',
-          ],
-          'process_outputs_as_sources': 1,
-          'message': 'Generating VideoToolbox stubs for dynamic loading',
-        },
+        '<(vt_stubs_output_root)',
       ],
     }],
     ['OS=="android"',{
@@ -704,36 +625,6 @@
         'public/common/pepper_plugin_info.h',
       ],
     }],
-    ['OS=="android"', {
-      'dependencies': [
-        '../media/media.gyp:media',
-      ],
-      'sources': [
-        'common/gpu/media/android_copying_backing_strategy.cc',
-        'common/gpu/media/android_copying_backing_strategy.h',
-        'common/gpu/media/android_deferred_rendering_backing_strategy.cc',
-        'common/gpu/media/android_deferred_rendering_backing_strategy.h',
-        'common/gpu/media/android_video_decode_accelerator.cc',
-        'common/gpu/media/android_video_decode_accelerator.h',
-        'common/gpu/media/avda_codec_image.cc',
-        'common/gpu/media/avda_codec_image.h',
-        'common/gpu/media/avda_return_on_failure.h',
-        'common/gpu/media/avda_shared_state.cc',
-        'common/gpu/media/avda_shared_state.h',
-        'common/gpu/media/avda_state_provider.h',
-        'common/gpu/media/avda_surface_tracker.h',
-        'common/gpu/media/avda_surface_tracker.cc',
-      ],
-    }],
-    ['OS=="android" and enable_webrtc==1', {
-      'dependencies': [
-        '../third_party/libyuv/libyuv.gyp:libyuv',
-      ],
-      'sources': [
-        'common/gpu/media/android_video_encode_accelerator.cc',
-        'common/gpu/media/android_video_encode_accelerator.h',
-      ],
-    }],
     ['enable_webrtc==1', {
       'dependencies': [
         '../third_party/libjingle/libjingle.gyp:libjingle',
@@ -743,227 +634,6 @@
         'public/common/webrtc_ip_handling_policy.h',
       ],
     }],
-    ['use_v4lplugin==1 and chromeos==1', {
-      'defines': [
-        'USE_LIBV4L2'
-      ],
-      'variables': {
-        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
-        'extra_header': 'common/gpu/media/v4l2_stub_header.fragment',
-        'sig_files': ['common/gpu/media/v4l2.sig'],
-        'outfile_type': 'posix_stubs',
-        'stubs_filename_root': 'v4l2_stubs',
-        'project_path': 'content/common/gpu/media',
-        'intermediate_dir': '<(INTERMEDIATE_DIR)',
-        'output_root': '<(SHARED_INTERMEDIATE_DIR)/v4l2',
-      },
-      'include_dirs': [
-        '<(output_root)',
-      ],
-      'actions': [
-        {
-          'action_name': 'generate_stubs',
-          'inputs': [
-            '<(generate_stubs_script)',
-            '<(extra_header)',
-            '<@(sig_files)',
-          ],
-          'outputs': [
-            '<(intermediate_dir)/<(stubs_filename_root).cc',
-            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
-          ],
-          'action': ['python',
-            '<(generate_stubs_script)',
-            '-i', '<(intermediate_dir)',
-            '-o', '<(output_root)/<(project_path)',
-            '-t', '<(outfile_type)',
-            '-e', '<(extra_header)',
-            '-s', '<(stubs_filename_root)',
-            '-p', '<(project_path)',
-            '<@(_inputs)',
-          ],
-          'process_outputs_as_sources': 1,
-          'message': 'Generating libv4l2 stubs for dynamic loading',
-        },
-      ],
-    }],
-    ['chromeos==1', {
-      'sources': [
-        'common/gpu/media/accelerated_video_decoder.h',
-        'common/gpu/media/h264_decoder.cc',
-        'common/gpu/media/h264_decoder.h',
-        'common/gpu/media/h264_dpb.cc',
-        'common/gpu/media/h264_dpb.h',
-        'common/gpu/media/vp8_decoder.cc',
-        'common/gpu/media/vp8_decoder.h',
-        'common/gpu/media/vp8_picture.cc',
-        'common/gpu/media/vp8_picture.h',
-        'common/gpu/media/vp9_decoder.cc',
-        'common/gpu/media/vp9_decoder.h',
-        'common/gpu/media/vp9_picture.cc',
-        'common/gpu/media/vp9_picture.h',
-      ],
-    }],
-    ['chromeos==1 and use_v4l2_codec==1', {
-      'direct_dependent_settings': {
-        'defines': [
-          'USE_V4L2_CODEC'
-        ],
-      },
-      'defines': [
-        'USE_V4L2_CODEC'
-      ],
-      'dependencies': [
-        '../media/media.gyp:media',
-        '../third_party/libyuv/libyuv.gyp:libyuv',
-      ],
-      'sources': [
-        'common/gpu/media/generic_v4l2_device.cc',
-        'common/gpu/media/generic_v4l2_device.h',
-        'common/gpu/media/v4l2_device.cc',
-        'common/gpu/media/v4l2_device.h',
-        'common/gpu/media/v4l2_image_processor.cc',
-        'common/gpu/media/v4l2_image_processor.h',
-        'common/gpu/media/v4l2_jpeg_decode_accelerator.cc',
-        'common/gpu/media/v4l2_jpeg_decode_accelerator.h',
-        'common/gpu/media/v4l2_slice_video_decode_accelerator.cc',
-        'common/gpu/media/v4l2_slice_video_decode_accelerator.h',
-        'common/gpu/media/v4l2_video_decode_accelerator.cc',
-        'common/gpu/media/v4l2_video_decode_accelerator.h',
-        'common/gpu/media/v4l2_video_encode_accelerator.cc',
-        'common/gpu/media/v4l2_video_encode_accelerator.h',
-      ],
-      'include_dirs': [
-        '<(DEPTH)/third_party/khronos',
-      ],
-    }],
-    ['target_arch == "arm" and chromeos == 1', {
-      'sources': [
-        'common/gpu/media/tegra_v4l2_device.cc',
-        'common/gpu/media/tegra_v4l2_device.h',
-      ],
-    }],
-    ['target_arch != "arm" and chromeos == 1', {
-      'dependencies': [
-        '../media/media.gyp:media',
-        '../third_party/libyuv/libyuv.gyp:libyuv',
-      ],
-      'sources': [
-        'common/gpu/media/va_surface.h',
-        'common/gpu/media/vaapi_jpeg_decode_accelerator.cc',
-        'common/gpu/media/vaapi_jpeg_decode_accelerator.h',
-        'common/gpu/media/vaapi_jpeg_decoder.cc',
-        'common/gpu/media/vaapi_jpeg_decoder.h',
-        'common/gpu/media/vaapi_picture.cc',
-        'common/gpu/media/vaapi_picture.h',
-        'common/gpu/media/vaapi_video_decode_accelerator.cc',
-        'common/gpu/media/vaapi_video_decode_accelerator.h',
-        'common/gpu/media/vaapi_video_encode_accelerator.cc',
-        'common/gpu/media/vaapi_video_encode_accelerator.h',
-        'common/gpu/media/vaapi_wrapper.cc',
-        'common/gpu/media/vaapi_wrapper.h',
-      ],
-      'conditions': [
-        ['use_x11 == 1', {
-          'variables': {
-            'sig_files': [
-              'common/gpu/media/va.sigs',
-              'common/gpu/media/va_x11.sigs',
-            ],
-          },
-          'sources': [
-            'common/gpu/media/vaapi_tfp_picture.cc',
-            'common/gpu/media/vaapi_tfp_picture.h',
-          ],
-        }, {
-          'variables': {
-            'sig_files': [
-              'common/gpu/media/va.sigs',
-              'common/gpu/media/va_drm.sigs',
-            ],
-          },
-          'sources': [
-            'common/gpu/media/vaapi_drm_picture.cc',
-            'common/gpu/media/vaapi_drm_picture.h',
-          ],
-        }],
-      ],
-      'variables': {
-        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
-        'extra_header': 'common/gpu/media/va_stub_header.fragment',
-        'outfile_type': 'posix_stubs',
-        'stubs_filename_root': 'va_stubs',
-        'project_path': 'content/common/gpu/media',
-        'intermediate_dir': '<(INTERMEDIATE_DIR)',
-        'output_root': '<(SHARED_INTERMEDIATE_DIR)/va',
-      },
-      'include_dirs': [
-        '<(DEPTH)/third_party/libva',
-        '<(DEPTH)/third_party/libyuv',
-        '<(output_root)',
-      ],
-      'actions': [
-        {
-          'action_name': 'generate_stubs',
-          'inputs': [
-            '<(generate_stubs_script)',
-            '<(extra_header)',
-            '<@(sig_files)',
-          ],
-          'outputs': [
-            '<(intermediate_dir)/<(stubs_filename_root).cc',
-            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
-          ],
-          'action': ['python',
-                     '<(generate_stubs_script)',
-                     '-i', '<(intermediate_dir)',
-                     '-o', '<(output_root)/<(project_path)',
-                     '-t', '<(outfile_type)',
-                     '-e', '<(extra_header)',
-                     '-s', '<(stubs_filename_root)',
-                     '-p', '<(project_path)',
-                     '<@(_inputs)',
-          ],
-          'process_outputs_as_sources': 1,
-          'message': 'Generating libva stubs for dynamic loading',
-        },
-     ]
-    }],
-    ['OS=="win"', {
-      'dependencies': [
-        '../media/media.gyp:media',
-        '../ui/gl/gl.gyp:gl',
-      ],
-      'link_settings': {
-        'libraries': [
-           '-ld3d9.lib',
-           '-ld3d11.lib',
-           '-ldxva2.lib',
-           '-lstrmiids.lib',
-           '-lmf.lib',
-           '-lmfplat.lib',
-           '-lmfuuid.lib',
-        ],
-        'msvs_settings': {
-          'VCLinkerTool': {
-            'DelayLoadDLLs': [
-              'd3d9.dll',
-			  'd3d11.dll',
-              'dxva2.dll',
-              'mf.dll',
-              'mfplat.dll',
-            ],
-          },
-        },
-      },
-      'sources': [
-        'common/gpu/media/dxva_video_decode_accelerator_win.cc',
-        'common/gpu/media/dxva_video_decode_accelerator_win.h',
-      ],
-      'include_dirs': [
-        '<(DEPTH)/third_party/khronos',
-      ],
-    }],
     ['use_seccomp_bpf==0', {
       'sources!': [
         'common/sandbox_linux/android/sandbox_bpf_base_policy_android.cc',
diff --git a/content/content_gpu.gypi b/content/content_gpu.gypi
index 45bfcd3..a75555f 100644
--- a/content/content_gpu.gypi
+++ b/content/content_gpu.gypi
@@ -8,6 +8,8 @@
     '../gpu/gpu.gyp:command_buffer_traits',
     '../gpu/gpu.gyp:gpu',
     '../gpu/gpu.gyp:gpu_ipc_service',
+    '../media/gpu/ipc/media_ipc.gyp:media_gpu_ipc_service',
+    '../media/media.gyp:media_gpu',
     '../skia/skia.gyp:skia',
     '../ui/gl/gl.gyp:gl',
     'content_common_mojo_bindings.gyp:content_common_mojo_bindings',
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index 356f77f..84fb9bb 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -20,7 +20,10 @@
     '../ipc/ipc.gyp:ipc',
     '../jingle/jingle.gyp:jingle_glue',
     '../media/blink/media_blink.gyp:media_blink',
+    '../media/gpu/ipc/media_ipc.gyp:media_gpu_ipc_client',
+    '../media/gpu/ipc/media_ipc.gyp:media_gpu_ipc_common',
     '../media/media.gyp:media',
+    '../media/media.gyp:media_gpu',
     '../mojo/mojo_base.gyp:mojo_geometry_lib',
     '../mojo/mojo_base.gyp:mojo_url_type_converters',
     '../mojo/mojo_edk.gyp:mojo_js_lib',
diff --git a/content/content_tests.gypi b/content/content_tests.gypi
index 207f615..816ea8ba 100644
--- a/content/content_tests.gypi
+++ b/content/content_tests.gypi
@@ -1574,184 +1574,6 @@
         },
       ],
     }],
-    ['chromeos==1 or OS=="win" or OS=="android"', {
-      'targets': [
-          {
-            # TODO(GYP): Port Windows and ChromeOS logic.
-            # GN: //content/test:video_decode_accelerator_unittest
-            'target_name': 'video_decode_accelerator_unittest',
-            'type': '<(gtest_target_type)',
-            'dependencies': [
-              '../base/base.gyp:base',
-              '../gpu/gpu.gyp:command_buffer_service',
-              '../media/media.gyp:media',
-              '../testing/gtest.gyp:gtest',
-              '../ui/base/ui_base.gyp:ui_base',
-              '../ui/gfx/gfx.gyp:gfx',
-              '../ui/gfx/gfx.gyp:gfx_geometry',
-              '../ui/gfx/gfx.gyp:gfx_test_support',
-              '../ui/gl/gl.gyp:gl',
-              '../ui/gl/gl.gyp:gl_test_support',
-              'content.gyp:content',
-            ],
-            'include_dirs': [
-              '<(DEPTH)/third_party/khronos',
-            ],
-            'sources': [
-              'common/gpu/media/android_video_decode_accelerator_unittest.cc',
-              'common/gpu/media/rendering_helper.cc',
-              'common/gpu/media/rendering_helper.h',
-              'common/gpu/media/video_accelerator_unittest_helpers.h',
-              'common/gpu/media/video_decode_accelerator_unittest.cc',
-            ],
-            'conditions': [
-              ['OS=="android"', {
-                'sources/': [
-                  ['exclude', '^common/gpu/media/rendering_helper.h'],
-                  ['exclude', '^common/gpu/media/rendering_helper.cc'],
-                  ['exclude', '^common/gpu/media/video_decode_accelerator_unittest.cc'],
-                ],
-                'dependencies': [
-                  '../media/media.gyp:player_android',
-                  '../testing/gmock.gyp:gmock',
-                  '../testing/android/native_test.gyp:native_test_native_code',
-                  '../gpu/gpu.gyp:gpu_unittest_utils',
-                ],
-              }, {  # OS!="android"
-                'sources/': [
-                  ['exclude', '^common/gpu/media/android_video_decode_accelerator_unittest.cc'],
-                ],
-              }],
-              ['OS=="win"', {
-                'dependencies': [
-                  '<(angle_path)/src/angle.gyp:libEGL',
-                  '<(angle_path)/src/angle.gyp:libGLESv2',
-                ],
-              }],
-              ['target_arch != "arm" and (OS=="linux" or chromeos == 1)', {
-                'include_dirs': [
-                  '<(DEPTH)/third_party/libva',
-                ],
-              }],
-              ['use_x11==1', {
-                'dependencies': [
-                  '../build/linux/system.gyp:x11',  # Used by rendering_helper.cc
-                  '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
-                ],
-              }],
-              ['use_ozone==1 and chromeos==1', {
-                'dependencies': [
-                  '../ui/display/display.gyp:display',  # Used by rendering_helper.cc
-                  '../ui/ozone/ozone.gyp:ozone',  # Used by rendering_helper.cc
-                ],
-              }],
-            ],
-            # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
-            'msvs_disabled_warnings': [ 4267, ],
-          },
-        ]
-    }],
-    ['chromeos==1 and target_arch != "arm"', {
-      'targets': [
-          {
-            'target_name': 'vaapi_jpeg_decoder_unittest',
-            'type': '<(gtest_target_type)',
-            'dependencies': [
-              'content.gyp:content_common',
-              '../base/base.gyp:base',
-              '../media/media.gyp:media',
-              '../media/media.gyp:media_test_support',
-              '../testing/gtest.gyp:gtest',
-            ],
-            'sources': [
-              'common/gpu/media/vaapi_jpeg_decoder_unittest.cc',
-            ],
-            'include_dirs': [
-              '<(DEPTH)/third_party/libva',
-            ],
-            'conditions': [
-              ['use_x11==1', {
-                'dependencies': [
-                  '../build/linux/system.gyp:x11',
-                ]
-              }, {
-                'dependencies': [
-                  '../build/linux/system.gyp:libdrm',
-                ]
-              }],
-            ],
-          }
-        ]
-    }],
-    ['chromeos==1 or OS=="mac"', {
-      'targets': [
-        {
-          'target_name': 'video_encode_accelerator_unittest',
-          'type': 'executable',
-          'dependencies': [
-            'content.gyp:content_common',
-            '../base/base.gyp:base',
-            '../media/media.gyp:media',
-            '../media/media.gyp:media_test_support',
-            '../testing/gtest.gyp:gtest',
-            '../ui/base/ui_base.gyp:ui_base',
-            '../ui/gfx/gfx.gyp:gfx',
-            '../ui/gfx/gfx.gyp:gfx_geometry',
-            '../ui/gfx/gfx.gyp:gfx_test_support',
-            '../ui/gl/gl.gyp:gl',
-            '../ui/gl/gl.gyp:gl_test_support',
-            'content.gyp:content',
-          ],
-          'sources': [
-            'common/gpu/media/video_accelerator_unittest_helpers.h',
-            'common/gpu/media/video_encode_accelerator_unittest.cc',
-          ],
-          'include_dirs': [
-            '<(DEPTH)/third_party/libva',
-            '<(DEPTH)/third_party/libyuv',
-          ],
-          'conditions': [
-            ['use_x11==1', {
-              'dependencies': [
-                '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
-              ],
-            }],
-            ['use_ozone==1', {
-              'dependencies': [
-                '../ui/ozone/ozone.gyp:ozone',
-              ],
-            }],
-          ],
-        }
-      ]
-    }],
-    ['chromeos==1', {
-      'targets': [
-        {
-          'target_name': 'jpeg_decode_accelerator_unittest',
-          'type': 'executable',
-          'dependencies': [
-            '../base/base.gyp:base',
-            '../media/media.gyp:media',
-            '../media/media.gyp:media_test_support',
-            '../testing/gtest.gyp:gtest',
-            '../third_party/libyuv/libyuv.gyp:libyuv',
-            '../ui/gfx/gfx.gyp:gfx',
-            '../ui/gfx/gfx.gyp:gfx_geometry',
-            '../ui/gl/gl.gyp:gl',
-            '../ui/gl/gl.gyp:gl_test_support',
-            'content.gyp:content',
-          ],
-          'sources': [
-            'common/gpu/media/jpeg_decode_accelerator_unittest.cc',
-          ],
-          'include_dirs': [
-            '<(DEPTH)/third_party/libva',
-            '<(DEPTH)/third_party/libyuv',
-          ],
-        }
-      ]
-    }],
     ['OS == "android"', {
       'targets': [
         {
@@ -1962,18 +1784,6 @@
           'includes': [ '../build/jni_generator.gypi' ],
         },
         {
-          # GN: //content/test:video_decode_accelerator_unittest_apk
-          'target_name': 'video_decode_accelerator_unittest_apk',
-          'type': 'none',
-          'dependencies': [
-            'video_decode_accelerator_unittest',
-          ],
-          'variables': {
-            'test_suite_name': 'video_decode_accelerator_unittest',
-          },
-          'includes': [ '../build/apk_test.gypi' ],
-        },
-        {
           # GN: //content/public/test/android:test_support_content_jni_headers
           'target_name': 'test_support_content_jni_headers',
           'type': 'none',
diff --git a/content/gpu/BUILD.gn b/content/gpu/BUILD.gn
index f91778dc..63d8adf1 100644
--- a/content/gpu/BUILD.gn
+++ b/content/gpu/BUILD.gn
@@ -46,6 +46,7 @@
     "//base/third_party/dynamic_annotations",
     "//components/tracing",
     "//content:export",
+    "//content:export",
     "//content/child",
     "//content/common",
     "//content/public/child:child_sources",
@@ -54,6 +55,8 @@
     "//gpu/ipc/common:command_buffer_traits",
     "//gpu/ipc/service",
     "//ipc",
+    "//media/gpu",
+    "//media/gpu/ipc/service",
     "//media/mojo/services:application_factory",
     "//services/shell/public/interfaces",
     "//skia",
diff --git a/content/gpu/gpu_child_thread.cc b/content/gpu/gpu_child_thread.cc
index 98f7bc1..0776d69 100644
--- a/content/gpu/gpu_child_thread.cc
+++ b/content/gpu/gpu_child_thread.cc
@@ -16,10 +16,6 @@
 #include "content/child/child_process.h"
 #include "content/child/thread_safe_sender.h"
 #include "content/common/establish_channel_params.h"
-#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
-#include "content/common/gpu/media/media_service.h"
 #include "content/common/gpu_host_messages.h"
 #include "content/gpu/gpu_process_control_impl.h"
 #include "content/gpu/gpu_watchdog_thread.h"
@@ -35,6 +31,10 @@
 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
 #include "ipc/ipc_channel_handle.h"
 #include "ipc/ipc_sync_message_filter.h"
+#include "media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_video_encode_accelerator.h"
+#include "media/gpu/ipc/service/media_service.h"
 #include "ui/gl/gl_implementation.h"
 #include "ui/gl/gl_switches.h"
 #include "ui/gl/gpu_switching_manager.h"
@@ -46,7 +46,7 @@
 #endif
 
 #if defined(OS_ANDROID)
-#include "content/common/gpu/media/avda_surface_tracker.h"
+#include "media/gpu/avda_surface_tracker.h"
 #endif
 
 namespace content {
@@ -367,12 +367,12 @@
   gpu_preferences_ = gpu_preferences;
 
   gpu_info_.video_decode_accelerator_capabilities =
-      content::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_);
+      media::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_);
   gpu_info_.video_encode_accelerator_supported_profiles =
-      content::GpuVideoEncodeAccelerator::GetSupportedProfiles(
+      media::GpuVideoEncodeAccelerator::GetSupportedProfiles(
           gpu_preferences_);
   gpu_info_.jpeg_decode_accelerator_supported =
-      content::GpuJpegDecodeAccelerator::IsSupported();
+      media::GpuJpegDecodeAccelerator::IsSupported();
 
   // Record initialization only after collecting the GPU info because that can
   // take a significant amount of time.
@@ -415,7 +415,7 @@
                             ChildProcess::current()->GetShutDownEvent(),
                             sync_point_manager, gpu_memory_buffer_factory_));
 
-  media_service_.reset(new MediaService(gpu_channel_manager_.get()));
+  media_service_.reset(new media::MediaService(gpu_channel_manager_.get()));
 
 #if defined(USE_OZONE)
   ui::OzonePlatform::GetInstance()
@@ -573,7 +573,7 @@
 }
 
 void GpuChildThread::OnDestroyingVideoSurface(int surface_id) {
-  AVDASurfaceTracker::GetInstance()->NotifyDestroyingSurface(surface_id);
+  media::AVDASurfaceTracker::GetInstance()->NotifyDestroyingSurface(surface_id);
   Send(new GpuHostMsg_DestroyingVideoSurfaceAck(surface_id));
 }
 #endif
diff --git a/content/gpu/gpu_child_thread.h b/content/gpu/gpu_child_thread.h
index 2617d647..5b3b44e2 100644
--- a/content/gpu/gpu_child_thread.h
+++ b/content/gpu/gpu_child_thread.h
@@ -34,6 +34,10 @@
 class SyncPointManager;
 }
 
+namespace media {
+class MediaService;
+}
+
 namespace sandbox {
 class TargetServices;
 }
@@ -41,7 +45,6 @@
 namespace content {
 class GpuProcessControlImpl;
 class GpuWatchdogThread;
-class MediaService;
 struct EstablishChannelParams;
 #if defined(OS_MACOSX)
 struct BufferPresentedParams;
@@ -163,7 +166,7 @@
 
   std::unique_ptr<gpu::GpuChannelManager> gpu_channel_manager_;
 
-  std::unique_ptr<MediaService> media_service_;
+  std::unique_ptr<media::MediaService> media_service_;
 
   // Information about the GPU, such as device and vendor ID.
   gpu::GPUInfo gpu_info_;
diff --git a/content/gpu/gpu_main.cc b/content/gpu/gpu_main.cc
index 486281c..7cfe918 100644
--- a/content/gpu/gpu_main.cc
+++ b/content/gpu/gpu_main.cc
@@ -56,7 +56,7 @@
 #if defined(OS_WIN)
 #include "base/win/windows_version.h"
 #include "base/win/scoped_com_initializer.h"
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#include "media/gpu/dxva_video_decode_accelerator_win.h"
 #include "sandbox/win/src/sandbox.h"
 #endif
 
@@ -75,7 +75,7 @@
 #endif
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_wrapper.h"
 #endif
 
 #if defined(SANITIZER_COVERAGE)
@@ -246,7 +246,7 @@
   gpu_info.in_process_gpu = false;
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  VaapiWrapper::PreSandboxInitialization();
+  media::VaapiWrapper::PreSandboxInitialization();
 #endif
 
 #if defined(OS_ANDROID) || defined(OS_CHROMEOS)
@@ -479,7 +479,7 @@
   }
 
 #if defined(OS_WIN)
-  content::DXVAVideoDecodeAccelerator::PreSandboxInitialization();
+  media::DXVAVideoDecodeAccelerator::PreSandboxInitialization();
 #endif
   return true;
 }
diff --git a/content/public/gpu/BUILD.gn b/content/public/gpu/BUILD.gn
index 867134ac..de4f504 100644
--- a/content/public/gpu/BUILD.gn
+++ b/content/public/gpu/BUILD.gn
@@ -36,7 +36,8 @@
     "//content/public/common:common_sources",
     "//gpu/command_buffer/service",
     "//gpu/config",
-    "//media:media",
+    "//media",
+    "//media/gpu",
   ]
 
   allow_circular_includes_from = [ "//content/gpu:gpu_sources" ]
diff --git a/content/public/gpu/gpu_video_decode_accelerator_factory.cc b/content/public/gpu/gpu_video_decode_accelerator_factory.cc
index ed8172f4..f6b468b 100644
--- a/content/public/gpu/gpu_video_decode_accelerator_factory.cc
+++ b/content/public/gpu/gpu_video_decode_accelerator_factory.cc
@@ -5,8 +5,8 @@
 #include "content/public/gpu/gpu_video_decode_accelerator_factory.h"
 
 #include "base/memory/ptr_util.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
 #include "content/gpu/gpu_child_thread.h"
+#include "media/gpu/gpu_video_decode_accelerator_factory_impl.h"
 
 namespace content {
 
@@ -18,7 +18,7 @@
     const GetGLContextCallback& get_gl_context_cb,
     const MakeGLContextCurrentCallback& make_context_current_cb,
     const BindGLImageCallback& bind_image_cb) {
-  auto gvdafactory_impl = GpuVideoDecodeAcceleratorFactoryImpl::Create(
+  auto gvdafactory_impl = media::GpuVideoDecodeAcceleratorFactoryImpl::Create(
       get_gl_context_cb, make_context_current_cb, bind_image_cb);
   if (!gvdafactory_impl)
     return nullptr;
@@ -35,7 +35,7 @@
     const BindGLImageCallback& bind_image_cb,
     const GetGLES2DecoderCallback& get_gles2_decoder_cb) {
   auto gvdafactory_impl =
-      GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
+      media::GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
           get_gl_context_cb, make_context_current_cb, bind_image_cb,
           get_gles2_decoder_cb);
   if (!gvdafactory_impl)
@@ -49,7 +49,7 @@
 std::unique_ptr<GpuVideoDecodeAcceleratorFactory>
 GpuVideoDecodeAcceleratorFactory::CreateWithNoGL() {
   auto gvdafactory_impl =
-      GpuVideoDecodeAcceleratorFactoryImpl::CreateWithNoGL();
+      media::GpuVideoDecodeAcceleratorFactoryImpl::CreateWithNoGL();
   if (!gvdafactory_impl)
     return nullptr;
 
@@ -62,7 +62,7 @@
 GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities() {
   const gpu::GpuPreferences gpu_preferences =
       GpuChildThread::current()->gpu_preferences();
-  return GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
+  return media::GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
       gpu_preferences);
 }
 
@@ -79,7 +79,7 @@
 }
 
 GpuVideoDecodeAcceleratorFactory::GpuVideoDecodeAcceleratorFactory(
-    std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl> gvdafactory_impl)
-    : gvdafactory_impl_(std::move(gvdafactory_impl)) {}
+    std::unique_ptr<media::GpuVideoDecodeAcceleratorFactoryImpl>
+    gvdafactory_impl) : gvdafactory_impl_(std::move(gvdafactory_impl)) {}
 
 }  // namespace content
diff --git a/content/public/gpu/gpu_video_decode_accelerator_factory.h b/content/public/gpu/gpu_video_decode_accelerator_factory.h
index ea740e24..6a1beb6f 100644
--- a/content/public/gpu/gpu_video_decode_accelerator_factory.h
+++ b/content/public/gpu/gpu_video_decode_accelerator_factory.h
@@ -27,9 +27,11 @@
 }
 }
 
-namespace content {
-
+namespace media {
 class GpuVideoDecodeAcceleratorFactoryImpl;
+}
+
+namespace content {
 
 // This factory allows creation of VideoDecodeAccelerator implementations,
 // providing the most applicable VDA for current platform and given
@@ -90,12 +92,14 @@
 
  private:
   // TODO(posciak): This is temporary and will not be needed once
-  // GpuVideoDecodeAcceleratorFactoryImpl implements
+  // media::GpuVideoDecodeAcceleratorFactoryImpl implements
   // GpuVideoDecodeAcceleratorFactory, see crbug.com/597150 and related.
   GpuVideoDecodeAcceleratorFactory(
-      std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl> gvdafactory_impl);
+      std::unique_ptr<media::GpuVideoDecodeAcceleratorFactoryImpl>
+      gvdafactory_impl);
 
-  std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl> gvdafactory_impl_;
+  std::unique_ptr<media::GpuVideoDecodeAcceleratorFactoryImpl>
+      gvdafactory_impl_;
 };
 
 }  // namespace content
diff --git a/content/renderer/BUILD.gn b/content/renderer/BUILD.gn
index 1fbf991d..0fab411e 100644
--- a/content/renderer/BUILD.gn
+++ b/content/renderer/BUILD.gn
@@ -58,6 +58,7 @@
     "//jingle:jingle_glue",
     "//media",
     "//media/blink",
+    "//media/gpu",
     "//media/gpu/ipc/client",
     "//media/gpu/ipc/common",
     "//media/midi",
diff --git a/content/renderer/media/renderer_gpu_video_accelerator_factories.cc b/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
index 00a8ba0..0020455 100644
--- a/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
+++ b/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
@@ -19,9 +19,9 @@
 #include "gpu/command_buffer/client/gles2_interface.h"
 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
 #include "gpu/ipc/client/gpu_channel_host.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
 #include "media/gpu/ipc/client/gpu_video_decode_accelerator_host.h"
 #include "media/gpu/ipc/client/gpu_video_encode_accelerator_host.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
 #include "media/video/video_decode_accelerator.h"
 #include "media/video/video_encode_accelerator.h"
 
diff --git a/content/renderer/pepper/pepper_video_encoder_host.cc b/content/renderer/pepper/pepper_video_encoder_host.cc
index ecc796330..e8e54bc2 100644
--- a/content/renderer/pepper/pepper_video_encoder_host.cc
+++ b/content/renderer/pepper/pepper_video_encoder_host.cc
@@ -19,8 +19,8 @@
 #include "gpu/ipc/client/command_buffer_proxy_impl.h"
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/video_frame.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
 #include "media/gpu/ipc/client/gpu_video_encode_accelerator_host.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
 #include "media/renderers/gpu_video_accelerator_factories.h"
 #include "media/video/video_encode_accelerator.h"
 #include "ppapi/c/pp_codecs.h"
diff --git a/content/test/BUILD.gn b/content/test/BUILD.gn
index c3220ad..d589deac 100644
--- a/content/test/BUILD.gn
+++ b/content/test/BUILD.gn
@@ -894,90 +894,3 @@
     deps += [ "//testing/android/native_test:native_test_native_code" ]
   }
 }
-
-if (is_android || is_chromeos) {
-  # TODO(GYP): Port Windows logic.
-  test("video_decode_accelerator_unittest") {
-    deps = [
-      "//base",
-      "//content",
-      "//media",
-      "//testing/gtest",
-      "//ui/base",
-      "//ui/gfx",
-      "//ui/gfx:test_support",
-      "//ui/gfx/geometry",
-      "//ui/gl",
-      "//ui/gl:test_support",
-    ]
-    configs += [ "//third_party/khronos:khronos_headers" ]
-    if (is_chromeos && target_cpu != "arm") {
-      configs += [ "//third_party/libva:libva_config" ]
-    }
-    sources = [
-      "//content/common/gpu/media/video_accelerator_unittest_helpers.h",
-    ]
-    if (is_android) {
-      sources += [ "//content/common/gpu/media/android_video_decode_accelerator_unittest.cc" ]
-    } else {
-      sources += [
-        "//content/common/gpu/media/rendering_helper.cc",
-        "//content/common/gpu/media/rendering_helper.h",
-        "//content/common/gpu/media/video_decode_accelerator_unittest.cc",
-      ]
-    }
-
-    if (is_android) {
-      deps += [
-        "//gpu:test_support",
-        "//media/base/android",
-        "//media/base/android:media_java",
-        "//media/capture/video/android:capture_java",
-        "//testing/gmock",
-        "//ui/android:ui_java",
-      ]
-    }
-    if (is_chromeos && use_ozone) {
-      deps += [
-        "//ui/display",  # Used by rendering_helper.cc
-        "//ui/ozone",  # Used by rendering_helper.cc
-      ]
-    }
-    if (use_x11) {
-      configs += [ "//build/config/linux:x11" ]
-      deps += [ "//ui/gfx/x" ]
-    }
-  }
-}
-
-if (is_chromeos || is_mac) {
-  test("video_encode_accelerator_unittest") {
-    deps = [
-      "//base",
-      "//content",
-      "//media",
-      "//media/base:test_support",
-      "//testing/gtest",
-      "//ui/base",
-      "//ui/gfx",
-      "//ui/gfx:test_support",
-      "//ui/gfx/geometry",
-      "//ui/gl",
-      "//ui/gl:test_support",
-    ]
-    configs += [
-      "//third_party/libva:libva_config",
-      "//third_party/libyuv:libyuv_config",
-    ]
-    sources = [
-      "//content/common/gpu/media/video_accelerator_unittest_helpers.h",
-      "//content/common/gpu/media/video_encode_accelerator_unittest.cc",
-    ]
-    if (use_x11) {
-      deps += [ "//ui/gfx/x" ]
-    }
-    if (use_ozone) {
-      deps += [ "//ui/ozone" ]
-    }
-  }
-}
diff --git a/media/filters/vp9_parser.h b/media/filters/vp9_parser.h
index 2f3d9c9..4d43049 100644
--- a/media/filters/vp9_parser.h
+++ b/media/filters/vp9_parser.h
@@ -7,7 +7,7 @@
 // accelerators, e.g. libva which implements VA-API, require the caller
 // (chrome) to feed them parsed VP9 frame header.
 //
-// See content::VP9Decoder for example usage.
+// See media::VP9Decoder for example usage.
 //
 #ifndef MEDIA_FILTERS_VP9_PARSER_H_
 #define MEDIA_FILTERS_VP9_PARSER_H_
diff --git a/media/gpu/BUILD.gn b/media/gpu/BUILD.gn
new file mode 100644
index 0000000..e52aa3e4
--- /dev/null
+++ b/media/gpu/BUILD.gn
@@ -0,0 +1,436 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/features.gni")
+import("//build/config/ui.gni")
+import("//media/gpu/args.gni")
+import("//media/media_options.gni")
+import("//testing/test.gni")
+
+if (is_mac) {
+  import("//build/config/mac/mac_sdk.gni")
+}
+
+if (is_chromeos && current_cpu != "arm") {
+  action("libva_generate_stubs") {
+    extra_header = "va_stub_header.fragment"
+
+    script = "../../tools/generate_stubs/generate_stubs.py"
+    sources = [
+      "va.sigs",
+    ]
+    inputs = [
+      extra_header,
+    ]
+    if (use_x11) {
+      sources += [ "va_x11.sigs" ]
+    }
+    if (use_ozone) {
+      sources += [ "va_drm.sigs" ]
+    }
+    stubs_filename_root = "va_stubs"
+
+    outputs = [
+      "$target_gen_dir/$stubs_filename_root.cc",
+      "$target_gen_dir/$stubs_filename_root.h",
+    ]
+    args = [
+      "-i",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-o",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-t",
+      "posix_stubs",
+      "-e",
+      rebase_path(extra_header, root_build_dir),
+      "-s",
+      stubs_filename_root,
+      "-p",
+      "media/gpu",
+    ]
+
+    args += rebase_path(sources, root_build_dir)
+  }
+}
+
+if (is_chromeos && use_v4lplugin) {
+  action("libv4l2_generate_stubs") {
+    extra_header = "v4l2_stub_header.fragment"
+
+    script = "../../tools/generate_stubs/generate_stubs.py"
+    sources = [
+      "v4l2.sig",
+    ]
+    inputs = [
+      extra_header,
+    ]
+    stubs_filename_root = "v4l2_stubs"
+
+    outputs = [
+      "$target_gen_dir/$stubs_filename_root.cc",
+      "$target_gen_dir/$stubs_filename_root.h",
+    ]
+    args = [
+      "-i",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-o",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-t",
+      "posix_stubs",
+      "-e",
+      rebase_path(extra_header, root_build_dir),
+      "-s",
+      stubs_filename_root,
+      "-p",
+      "media/gpu",
+    ]
+
+    args += rebase_path(sources, root_build_dir)
+  }
+}
+
+if (is_mac) {
+  action("libvt_generate_stubs") {
+    extra_header = "vt_stubs_header.fragment"
+
+    script = "../../tools/generate_stubs/generate_stubs.py"
+    sources = [
+      "vt.sig",
+    ]
+    inputs = [
+      extra_header,
+    ]
+    stubs_filename_root = "vt_stubs"
+
+    outputs = [
+      "$target_gen_dir/$stubs_filename_root.cc",
+      "$target_gen_dir/$stubs_filename_root.h",
+    ]
+    args = [
+      "-i",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-o",
+      rebase_path("$target_gen_dir", root_build_dir),
+      "-t",
+      "posix_stubs",
+      "-e",
+      rebase_path(extra_header, root_build_dir),
+      "-s",
+      stubs_filename_root,
+      "-p",
+      "media/gpu",
+    ]
+
+    args += rebase_path(sources, root_build_dir)
+  }
+}
+
+component("gpu") {
+  output_name = "media_gpu"
+
+  # Only local test code, GPU-related IPC code in the media layer, and
+  # media-related content code should access //media/gpu.
+  visibility = [
+    "//content/gpu:*",
+    "//content/public/gpu:*",
+    "//content/renderer:*",
+    "//media/gpu/ipc/*",
+    ":*",
+  ]
+
+  defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
+
+  sources = [
+    "fake_video_decode_accelerator.cc",
+    "fake_video_decode_accelerator.h",
+    "gpu_video_accelerator_util.cc",
+    "gpu_video_accelerator_util.h",
+    "gpu_video_decode_accelerator_factory_impl.cc",
+    "gpu_video_decode_accelerator_factory_impl.h",
+    "gpu_video_decode_accelerator_helpers.h",
+    "shared_memory_region.cc",
+    "shared_memory_region.h",
+  ]
+
+  public_deps = [
+    "//base",
+    "//gpu",
+    "//media",
+    "//ui/gfx/geometry",
+  ]
+  deps = [
+    "//ui/display/types",
+    "//ui/gl",
+    "//ui/platform_window",
+  ]
+  libs = []
+  ldflags = []
+
+  if (is_mac) {
+    sources += [
+                 "vt_mac.h",
+                 "vt_video_decode_accelerator_mac.cc",
+                 "vt_video_decode_accelerator_mac.h",
+                 "vt_video_encode_accelerator_mac.cc",
+                 "vt_video_encode_accelerator_mac.h",
+               ] + get_target_outputs(":libvt_generate_stubs")
+    deps += [ ":libvt_generate_stubs" ]
+    lib_dirs = [ "$mac_sdk_path/usr/lib" ]
+
+    # TODO(markdittmer): Determine which libs are needed here.
+    libs += [
+      # "AVFoundation.framework",
+      # "CoreMedia.framework",
+      # "CoreVideo.framework",
+      # "IOSurface.framework",
+      # "OpenGL.framework",
+      # "QuartzCore.framework",
+      # "sandbox",
+    ]
+  }
+
+  if (is_android) {
+    sources += [
+      "android_copying_backing_strategy.cc",
+      "android_copying_backing_strategy.h",
+      "android_deferred_rendering_backing_strategy.cc",
+      "android_deferred_rendering_backing_strategy.h",
+      "android_video_decode_accelerator.cc",
+      "android_video_decode_accelerator.h",
+      "avda_codec_image.cc",
+      "avda_codec_image.h",
+      "avda_return_on_failure.h",
+      "avda_shared_state.cc",
+      "avda_shared_state.h",
+      "avda_state_provider.h",
+      "avda_surface_tracker.cc",
+      "avda_surface_tracker.h",
+    ]
+
+    if (enable_webrtc) {
+      deps += [ "//third_party/libyuv" ]
+      sources += [
+        "android_video_encode_accelerator.cc",
+        "android_video_encode_accelerator.h",
+      ]
+    }
+
+    if (mojo_media_host == "gpu") {
+      deps += [ "//media/mojo/services:cdm_service" ]
+    }
+  }
+
+  if (is_chromeos) {
+    sources += [
+      "accelerated_video_decoder.h",
+      "h264_decoder.cc",
+      "h264_decoder.h",
+      "h264_dpb.cc",
+      "h264_dpb.h",
+      "vp8_decoder.cc",
+      "vp8_decoder.h",
+      "vp8_picture.cc",
+      "vp8_picture.h",
+      "vp9_decoder.cc",
+      "vp9_decoder.h",
+      "vp9_picture.cc",
+      "vp9_picture.h",
+    ]
+    if (use_v4lplugin) {
+      defines += [ "USE_LIBV4L2" ]
+      sources += get_target_outputs(":libv4l2_generate_stubs")
+      deps += [ ":libv4l2_generate_stubs" ]
+    }
+    if (use_v4l2_codec) {
+      defines += [ "USE_V4L2_CODEC" ]
+      deps += [ "//third_party/libyuv" ]
+      sources += [
+        "generic_v4l2_device.cc",
+        "generic_v4l2_device.h",
+        "v4l2_device.cc",
+        "v4l2_device.h",
+        "v4l2_image_processor.cc",
+        "v4l2_image_processor.h",
+        "v4l2_jpeg_decode_accelerator.cc",
+        "v4l2_jpeg_decode_accelerator.h",
+        "v4l2_slice_video_decode_accelerator.cc",
+        "v4l2_slice_video_decode_accelerator.h",
+        "v4l2_video_decode_accelerator.cc",
+        "v4l2_video_decode_accelerator.h",
+        "v4l2_video_encode_accelerator.cc",
+        "v4l2_video_encode_accelerator.h",
+      ]
+      libs = [
+        "EGL",
+        "GLESv2",
+      ]
+    }
+    if (current_cpu == "arm") {
+      sources += [
+        "tegra_v4l2_device.cc",
+        "tegra_v4l2_device.h",
+      ]
+    }
+    if (current_cpu != "arm") {
+      sources += [
+                   "va_surface.h",
+                   "vaapi_jpeg_decode_accelerator.cc",
+                   "vaapi_jpeg_decode_accelerator.h",
+                   "vaapi_jpeg_decoder.cc",
+                   "vaapi_jpeg_decoder.h",
+                   "vaapi_picture.cc",
+                   "vaapi_picture.h",
+                   "vaapi_video_decode_accelerator.cc",
+                   "vaapi_video_decode_accelerator.h",
+                   "vaapi_video_encode_accelerator.cc",
+                   "vaapi_video_encode_accelerator.h",
+                   "vaapi_wrapper.cc",
+                   "vaapi_wrapper.h",
+                 ] + get_target_outputs(":libva_generate_stubs")
+      configs += [
+        "//third_party/libva:libva_config",
+        "//third_party/libyuv:libyuv_config",
+      ]
+      deps += [
+        ":libva_generate_stubs",
+        "//media",
+        "//third_party/libyuv",
+      ]
+      if (use_x11) {
+        sources += [
+          "vaapi_tfp_picture.cc",
+          "vaapi_tfp_picture.h",
+        ]
+      }
+      if (use_ozone) {
+        sources += [
+          "vaapi_drm_picture.cc",
+          "vaapi_drm_picture.h",
+        ]
+      }
+    }
+  }
+
+  if (is_win) {
+    sources += [
+      "dxva_video_decode_accelerator_win.cc",
+      "dxva_video_decode_accelerator_win.h",
+    ]
+    configs += [
+      "//build/config/compiler:no_size_t_to_int_warning",
+      "//third_party/khronos:khronos_headers",
+    ]
+    deps += [ "//ui/gl" ]
+    libs += [
+      "d3d9.lib",
+      "d3d11.lib",
+      "dxva2.lib",
+      "strmiids.lib",
+      "mf.lib",
+      "mfplat.lib",
+      "mfuuid.lib",
+    ]
+    ldflags += [
+      "/DELAYLOAD:d3d9.dll",
+      "/DELAYLOAD:d3d11.dll",
+      "/DELAYLOAD:dxva2.dll",
+      "/DELAYLOAD:mf.dll",
+      "/DELAYLOAD:mfplat.dll",
+    ]
+
+    # TODO(GYP): extract_xinput action.
+  }
+
+  if (use_x11) {
+    deps += [ "//ui/gfx/x" ]
+  }
+}
+
+if (is_android || is_chromeos) {
+  # TODO(GYP): Port Windows logic.
+  test("video_decode_accelerator_unittest") {
+    deps = [
+      ":gpu",
+      "//base",
+      "//media",
+      "//media/gpu",
+      "//testing/gtest",
+      "//ui/base",
+      "//ui/gfx",
+      "//ui/gfx:test_support",
+      "//ui/gfx/geometry",
+      "//ui/gl",
+      "//ui/gl:test_support",
+    ]
+    configs += [ "//third_party/khronos:khronos_headers" ]
+    if (is_chromeos && target_cpu != "arm") {
+      configs += [ "//third_party/libva:libva_config" ]
+    }
+    sources = [
+      "video_accelerator_unittest_helpers.h",
+    ]
+    if (is_android) {
+      sources += [ "android_video_decode_accelerator_unittest.cc" ]
+    } else {
+      sources += [
+        "rendering_helper.cc",
+        "rendering_helper.h",
+        "video_decode_accelerator_unittest.cc",
+      ]
+    }
+
+    if (is_android) {
+      deps += [
+        "//gpu:test_support",
+        "//media/base/android",
+        "//media/base/android:media_java",
+        "//media/capture/video/android:capture_java",
+        "//testing/gmock",
+        "//ui/android:ui_java",
+      ]
+    }
+    if (is_chromeos && use_ozone) {
+      deps += [
+        "//ui/display",  # Used by rendering_helper.cc
+        "//ui/ozone",  # Used by rendering_helper.cc
+      ]
+    }
+    if (use_x11) {
+      configs += [ "//build/config/linux:x11" ]
+      deps += [ "//ui/gfx/x" ]
+    }
+  }
+}
+
+if (is_chromeos || is_mac) {
+  test("video_encode_accelerator_unittest") {
+    deps = [
+      "//base",
+      "//media",
+      "//media/base:test_support",
+      "//media/gpu",
+      "//testing/gtest",
+      "//ui/base",
+      "//ui/gfx",
+      "//ui/gfx:test_support",
+      "//ui/gfx/geometry",
+      "//ui/gl",
+      "//ui/gl:test_support",
+    ]
+    configs += [
+      "//third_party/libva:libva_config",
+      "//third_party/libyuv:libyuv_config",
+    ]
+    sources = [
+      "video_accelerator_unittest_helpers.h",
+      "video_encode_accelerator_unittest.cc",
+    ]
+    if (use_x11) {
+      deps += [ "//ui/gfx/x" ]
+    }
+    if (use_ozone) {
+      deps += [ "//ui/ozone" ]
+    }
+  }
+}
diff --git a/media/gpu/DEPS b/media/gpu/DEPS
new file mode 100644
index 0000000..182085c3
--- /dev/null
+++ b/media/gpu/DEPS
@@ -0,0 +1,13 @@
+# Do NOT add net/ or ui/base without a great reason, they're huge!
+include_rules = [
+  "+third_party/angle",
+  "+third_party/libva",
+  "+third_party/libyuv",
+  "+third_party/v4l-utils",
+  "+ui/display/chromeos",
+  "+ui/display/types",
+  "+ui/platform_window",
+
+  # media/gpu is not part of "media" target and should not use MEDIA_EXPORT.
+  "-media/base/media_export.h"
+]
diff --git a/content/common/gpu/media/accelerated_video_decoder.h b/media/gpu/accelerated_video_decoder.h
similarity index 87%
rename from content/common/gpu/media/accelerated_video_decoder.h
rename to media/gpu/accelerated_video_decoder.h
index 462e631a..c9b6c29 100644
--- a/content/common/gpu/media/accelerated_video_decoder.h
+++ b/media/gpu/accelerated_video_decoder.h
@@ -2,23 +2,23 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_H_
-#define CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_H_
+#ifndef MEDIA_GPU_ACCELERATED_VIDEO_DECODER_H_
+#define MEDIA_GPU_ACCELERATED_VIDEO_DECODER_H_
 
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/macros.h"
-#include "content/common/content_export.h"
+#include "media/gpu/media_gpu_export.h"
 #include "ui/gfx/geometry/size.h"
 
-namespace content {
+namespace media {
 
 // An AcceleratedVideoDecoder is a video decoder that requires support from an
 // external accelerator (typically a hardware accelerator) to partially offload
 // the decode process after parsing stream headers, and performing reference
 // frame and state management.
-class CONTENT_EXPORT AcceleratedVideoDecoder {
+class MEDIA_GPU_EXPORT AcceleratedVideoDecoder {
  public:
   AcceleratedVideoDecoder() {}
   virtual ~AcceleratedVideoDecoder() {}
@@ -61,6 +61,6 @@
   DISALLOW_COPY_AND_ASSIGN(AcceleratedVideoDecoder);
 };
 
-}  //  namespace content
+}  //  namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_H_
+#endif  // MEDIA_GPU_ACCELERATED_VIDEO_DECODER_H_
diff --git a/content/common/gpu/media/android_copying_backing_strategy.cc b/media/gpu/android_copying_backing_strategy.cc
similarity index 95%
rename from content/common/gpu/media/android_copying_backing_strategy.cc
rename to media/gpu/android_copying_backing_strategy.cc
index b091334..6aa8b67 100644
--- a/content/common/gpu/media/android_copying_backing_strategy.cc
+++ b/media/gpu/android_copying_backing_strategy.cc
@@ -2,21 +2,21 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/android_copying_backing_strategy.h"
+#include "media/gpu/android_copying_backing_strategy.h"
 
 #include "base/bind.h"
 #include "base/logging.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/avda_return_on_failure.h"
 #include "gpu/command_buffer/service/context_group.h"
 #include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
 #include "media/base/limits.h"
+#include "media/gpu/avda_return_on_failure.h"
 #include "media/video/picture.h"
 #include "ui/gl/android/surface_texture.h"
 #include "ui/gl/gl_bindings.h"
 
-namespace content {
+namespace media {
 
 AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy(
     AVDAStateProvider* state_provider)
@@ -125,8 +125,10 @@
   if (!copier_) {
     copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager());
     copier_->Initialize(state_provider_->GetGlDecoder().get(),
-                        state_provider_->GetGlDecoder()->GetContextGroup()->
-                            feature_info()->feature_flags());
+                        state_provider_->GetGlDecoder()
+                            ->GetContextGroup()
+                            ->feature_info()
+                            ->feature_flags());
   }
 
   // Here, we copy |surface_texture_id_| to the picture buffer instead of
@@ -188,4 +190,4 @@
                                 GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect(new_size));
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/android_copying_backing_strategy.h b/media/gpu/android_copying_backing_strategy.h
similarity index 81%
rename from content/common/gpu/media/android_copying_backing_strategy.h
rename to media/gpu/android_copying_backing_strategy.h
index 8c004d5..d1a3f09 100644
--- a/content/common/gpu/media/android_copying_backing_strategy.h
+++ b/media/gpu/android_copying_backing_strategy.h
@@ -2,16 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_ANDROID_COPYING_BACKING_STRATEGY_H_
-#define CONTENT_COMMON_GPU_MEDIA_ANDROID_COPYING_BACKING_STRATEGY_H_
+#ifndef MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
+#define MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
 
 #include <stdint.h>
 
 #include <memory>
 
 #include "base/compiler_specific.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#include "media/gpu/android_video_decode_accelerator.h"
+#include "media/gpu/media_gpu_export.h"
 
 namespace gpu {
 class CopyTextureCHROMIUMResourceManager;
@@ -21,13 +21,13 @@
 class PictureBuffer;
 }
 
-namespace content {
+namespace media {
 
 class AVDAStateProvider;
 
 // A BackingStrategy implementation that copies images to PictureBuffer
 // textures via gpu texture copy.
-class CONTENT_EXPORT AndroidCopyingBackingStrategy
+class MEDIA_GPU_EXPORT AndroidCopyingBackingStrategy
     : public AndroidVideoDecodeAccelerator::BackingStrategy {
  public:
   explicit AndroidCopyingBackingStrategy(AVDAStateProvider* state_provider);
@@ -65,6 +65,6 @@
   media::VideoCodecBridge* media_codec_;
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_ANDROID_COPYING_BACKING_STRATEGY_H_
+#endif  // MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
diff --git a/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc b/media/gpu/android_deferred_rendering_backing_strategy.cc
similarity index 96%
rename from content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
rename to media/gpu/android_deferred_rendering_backing_strategy.cc
index 297513b4..1f6cfd74a 100644
--- a/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
+++ b/media/gpu/android_deferred_rendering_backing_strategy.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
+#include "media/gpu/android_deferred_rendering_backing_strategy.h"
 
 #include <EGL/egl.h>
 #include <EGL/eglext.h>
@@ -12,15 +12,15 @@
 #include "base/logging.h"
 #include "base/message_loop/message_loop.h"
 #include "base/metrics/histogram.h"
-#include "content/common/gpu/media/avda_codec_image.h"
-#include "content/common/gpu/media/avda_return_on_failure.h"
-#include "content/common/gpu/media/avda_shared_state.h"
 #include "gpu/command_buffer/service/context_group.h"
 #include "gpu/command_buffer/service/gl_stream_texture_image.h"
 #include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
 #include "gpu/command_buffer/service/texture_manager.h"
 #include "gpu/ipc/common/gpu_surface_lookup.h"
 #include "gpu/ipc/service/gpu_channel.h"
+#include "media/gpu/avda_codec_image.h"
+#include "media/gpu/avda_return_on_failure.h"
+#include "media/gpu/avda_shared_state.h"
 #include "ui/gl/android/surface_texture.h"
 #include "ui/gl/egl_util.h"
 #include "ui/gl/gl_bindings.h"
@@ -28,7 +28,7 @@
 #include "ui/gl/scoped_binders.h"
 #include "ui/gl/scoped_make_current.h"
 
-namespace content {
+namespace media {
 
 AndroidDeferredRenderingBackingStrategy::
     AndroidDeferredRenderingBackingStrategy(AVDAStateProvider* state_provider)
@@ -384,8 +384,7 @@
   EGLBoolean result =
       eglDestroyImageKHR(gfx::GLSurfaceEGL::GetHardwareDisplay(), egl_image);
   if (result == EGL_FALSE) {
-    DLOG(ERROR) << "Error destroying EGLImage: "
-                << ui::GetLastEGLErrorString();
+    DLOG(ERROR) << "Error destroying EGLImage: " << ui::GetLastEGLErrorString();
   }
 }
 
@@ -423,14 +422,14 @@
   // Mali + <= KitKat crashes when we try to do this.  We don't know if it's
   // due to detaching a surface texture, but it's the same set of devices.
   if (!DoesSurfaceTextureDetachWork())
-      return false;
+    return false;
 
   // Other devices are unreliable for other reasons (e.g., EGLImage).
   if (gpu::gles2::GLES2Decoder* gl_decoder =
           state_provider_->GetGlDecoder().get()) {
     if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) {
       if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) {
-          return !feature_info->workarounds().avda_dont_copy_pictures;
+        return !feature_info->workarounds().avda_dont_copy_pictures;
       }
     }
   }
@@ -439,4 +438,4 @@
   return true;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/android_deferred_rendering_backing_strategy.h b/media/gpu/android_deferred_rendering_backing_strategy.h
similarity index 88%
rename from content/common/gpu/media/android_deferred_rendering_backing_strategy.h
rename to media/gpu/android_deferred_rendering_backing_strategy.h
index def6edb..2089aa9 100644
--- a/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
+++ b/media/gpu/android_deferred_rendering_backing_strategy.h
@@ -2,15 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
-#define CONTENT_COMMON_GPU_MEDIA_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
+#ifndef MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
+#define MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
 
 #include <stdint.h>
 #include <vector>
 
 #include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#include "media/gpu/android_video_decode_accelerator.h"
+#include "media/gpu/media_gpu_export.h"
 
 namespace gl {
 class GLImage;
@@ -23,7 +23,7 @@
 }
 }
 
-namespace content {
+namespace media {
 
 class AVDACodecImage;
 class AVDASharedState;
@@ -32,7 +32,7 @@
 // a PictureBuffer's texture is used to draw, then draws using the surface
 // texture's front buffer rather than a copy.  To do this, it uses a GLImage
 // implementation to talk to MediaCodec.
-class CONTENT_EXPORT AndroidDeferredRenderingBackingStrategy
+class MEDIA_GPU_EXPORT AndroidDeferredRenderingBackingStrategy
     : public AndroidVideoDecodeAccelerator::BackingStrategy {
  public:
   explicit AndroidDeferredRenderingBackingStrategy(
@@ -106,6 +106,6 @@
   DISALLOW_COPY_AND_ASSIGN(AndroidDeferredRenderingBackingStrategy);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
+#endif  // MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
diff --git a/content/common/gpu/media/android_video_decode_accelerator.cc b/media/gpu/android_video_decode_accelerator.cc
similarity index 98%
rename from content/common/gpu/media/android_video_decode_accelerator.cc
rename to media/gpu/android_video_decode_accelerator.cc
index 36c11c5..f6b7178 100644
--- a/content/common/gpu/media/android_video_decode_accelerator.cc
+++ b/media/gpu/android_video_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#include "media/gpu/android_video_decode_accelerator.h"
 
 #include <stddef.h>
 
@@ -21,10 +21,6 @@
 #include "base/task_runner_util.h"
 #include "base/threading/thread_checker.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/android_copying_backing_strategy.h"
-#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
-#include "content/common/gpu/media/avda_return_on_failure.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
 #include "gpu/command_buffer/service/mailbox_manager.h"
 #include "gpu/ipc/service/gpu_channel.h"
@@ -36,6 +32,10 @@
 #include "media/base/media.h"
 #include "media/base/timestamp_constants.h"
 #include "media/base/video_decoder_config.h"
+#include "media/gpu/android_copying_backing_strategy.h"
+#include "media/gpu/android_deferred_rendering_backing_strategy.h"
+#include "media/gpu/avda_return_on_failure.h"
+#include "media/gpu/shared_memory_region.h"
 #include "media/video/picture.h"
 #include "ui/gl/android/scoped_java_surface.h"
 #include "ui/gl/android/surface_texture.h"
@@ -51,7 +51,7 @@
     PostError(FROM_HERE, media::VideoDecodeAccelerator::error_code); \
   } while (0)
 
-namespace content {
+namespace media {
 
 enum { kNumPictureBuffers = media::limits::kMaxVideoFrames + 1 };
 
@@ -64,18 +64,17 @@
 // MediaCodec fail when decoding if it's not actually supported. It's assumed
 // that consumers won't have software fallback for H264 on Android anyway.
 static const media::VideoCodecProfile kSupportedH264Profiles[] = {
-  media::H264PROFILE_BASELINE,
-  media::H264PROFILE_MAIN,
-  media::H264PROFILE_EXTENDED,
-  media::H264PROFILE_HIGH,
-  media::H264PROFILE_HIGH10PROFILE,
-  media::H264PROFILE_HIGH422PROFILE,
-  media::H264PROFILE_HIGH444PREDICTIVEPROFILE,
-  media::H264PROFILE_SCALABLEBASELINE,
-  media::H264PROFILE_SCALABLEHIGH,
-  media::H264PROFILE_STEREOHIGH,
-  media::H264PROFILE_MULTIVIEWHIGH
-};
+    media::H264PROFILE_BASELINE,
+    media::H264PROFILE_MAIN,
+    media::H264PROFILE_EXTENDED,
+    media::H264PROFILE_HIGH,
+    media::H264PROFILE_HIGH10PROFILE,
+    media::H264PROFILE_HIGH422PROFILE,
+    media::H264PROFILE_HIGH444PREDICTIVEPROFILE,
+    media::H264PROFILE_SCALABLEBASELINE,
+    media::H264PROFILE_SCALABLEHIGH,
+    media::H264PROFILE_STEREOHIGH,
+    media::H264PROFILE_MULTIVIEWHIGH};
 
 // Because MediaCodec is thread-hostile (must be poked on a single thread) and
 // has no callback mechanism (b/11990118), we must drive it by polling for
@@ -1465,4 +1464,4 @@
   return capabilities;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/android_video_decode_accelerator.h b/media/gpu/android_video_decode_accelerator.h
similarity index 96%
rename from content/common/gpu/media/android_video_decode_accelerator.h
rename to media/gpu/android_video_decode_accelerator.h
index dec3efcb..5d94987 100644
--- a/content/common/gpu/media/android_video_decode_accelerator.h
+++ b/media/gpu/android_video_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <stdint.h>
 
@@ -17,14 +17,14 @@
 #include "base/compiler_specific.h"
 #include "base/threading/thread_checker.h"
 #include "base/timer/timer.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/avda_state_provider.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
 #include "gpu/command_buffer/service/gpu_preferences.h"
 #include "media/base/android/media_drm_bridge_cdm_context.h"
 #include "media/base/android/sdk_media_codec_bridge.h"
 #include "media/base/media_keys.h"
+#include "media/gpu/avda_state_provider.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gl/android/scoped_java_surface.h"
 
@@ -32,14 +32,14 @@
 class SurfaceTexture;
 }
 
-namespace content {
+namespace media {
 
 // A VideoDecodeAccelerator implementation for Android.
 // This class decodes the input encoded stream by using Android's MediaCodec
 // class. http://developer.android.com/reference/android/media/MediaCodec.html
 // It delegates attaching pictures to PictureBuffers to a BackingStrategy, but
 // otherwise handles the work of transferring data to / from MediaCodec.
-class CONTENT_EXPORT AndroidVideoDecodeAccelerator
+class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
     : public media::VideoDecodeAccelerator,
       public AVDAStateProvider {
  public:
@@ -422,6 +422,6 @@
   friend class AndroidVideoDecodeAcceleratorTest;
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/android_video_decode_accelerator_unittest.cc b/media/gpu/android_video_decode_accelerator_unittest.cc
similarity index 92%
rename from content/common/gpu/media/android_video_decode_accelerator_unittest.cc
rename to media/gpu/android_video_decode_accelerator_unittest.cc
index 2a10eea..69450136 100644
--- a/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
+++ b/media/gpu/android_video_decode_accelerator_unittest.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#include "media/gpu/android_video_decode_accelerator.h"
 
 #include <stdint.h>
 
@@ -12,11 +12,11 @@
 #include "base/bind.h"
 #include "base/logging.h"
 #include "base/message_loop/message_loop.h"
-#include "content/common/gpu/media/android_copying_backing_strategy.h"
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
 #include "media/base/android/media_codec_util.h"
 #include "media/base/android/media_jni_registrar.h"
+#include "media/gpu/android_copying_backing_strategy.h"
+#include "media/gpu/android_video_decode_accelerator.h"
 #include "media/video/picture.h"
 #include "media/video/video_decode_accelerator.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -35,7 +35,7 @@
 
 }  // namespace
 
-namespace content {
+namespace media {
 
 class MockVideoDecodeAcceleratorClient
     : public media::VideoDecodeAccelerator::Client {
@@ -104,9 +104,9 @@
   EXPECT_TRUE(Configure(media::kCodecVP8));
 }
 
-}  // namespace content
+}  // namespace media
 
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
 }
diff --git a/content/common/gpu/media/android_video_encode_accelerator.cc b/media/gpu/android_video_encode_accelerator.cc
similarity index 84%
rename from content/common/gpu/media/android_video_encode_accelerator.cc
rename to media/gpu/android_video_encode_accelerator.cc
index 9a636b9..65700f6 100644
--- a/content/common/gpu/media/android_video_encode_accelerator.cc
+++ b/media/gpu/android_video_encode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/android_video_encode_accelerator.h"
+#include "media/gpu/android_video_encode_accelerator.h"
 
 #include <memory>
 #include <set>
@@ -11,12 +11,12 @@
 #include "base/logging.h"
 #include "base/message_loop/message_loop.h"
 #include "base/metrics/histogram.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "media/base/android/media_codec_util.h"
 #include "media/base/bitstream_buffer.h"
 #include "media/base/limits.h"
+#include "media/gpu/shared_memory_region.h"
 #include "media/video/picture.h"
 #include "third_party/libyuv/include/libyuv/convert_from.h"
 #include "ui/gl/android/scoped_java_surface.h"
@@ -25,7 +25,7 @@
 using media::VideoCodecBridge;
 using media::VideoFrame;
 
-namespace content {
+namespace media {
 
 // Limit default max video codec size for Android to avoid
 // HW codec initialization failure for resolution higher than 720p.
@@ -94,8 +94,7 @@
 }
 
 AndroidVideoEncodeAccelerator::AndroidVideoEncodeAccelerator()
-    : num_buffers_at_codec_(0),
-      last_set_bitrate_(0) {}
+    : num_buffers_at_codec_(0), last_set_bitrate_(0) {}
 
 AndroidVideoEncodeAccelerator::~AndroidVideoEncodeAccelerator() {
   DCHECK(thread_checker_.CalledOnValidThread());
@@ -106,13 +105,11 @@
   SupportedProfiles profiles;
 
   const struct {
-      const media::VideoCodec codec;
-      const media::VideoCodecProfile profile;
-  } kSupportedCodecs[] = {
-      { media::kCodecVP8, media::VP8PROFILE_ANY },
-      { media::kCodecH264, media::H264PROFILE_BASELINE },
-      { media::kCodecH264, media::H264PROFILE_MAIN }
-  };
+    const media::VideoCodec codec;
+    const media::VideoCodecProfile profile;
+  } kSupportedCodecs[] = {{media::kCodecVP8, media::VP8PROFILE_ANY},
+                          {media::kCodecH264, media::H264PROFILE_BASELINE},
+                          {media::kCodecH264, media::H264PROFILE_MAIN}};
 
   for (const auto& supported_codec : kSupportedCodecs) {
     if (supported_codec.codec == media::kCodecVP8 &&
@@ -129,8 +126,7 @@
     profile.profile = supported_codec.profile;
     // It would be nice if MediaCodec exposes the maximum capabilities of
     // the encoder. Hard-code some reasonable defaults as workaround.
-    profile.max_resolution.SetSize(kMaxEncodeFrameWidth,
-                                   kMaxEncodeFrameHeight);
+    profile.max_resolution.SetSize(kMaxEncodeFrameWidth, kMaxEncodeFrameHeight);
     profile.max_framerate_numerator = kMaxFramerateNumerator;
     profile.max_framerate_denominator = kMaxFramerateDenominator;
     profiles.push_back(profile);
@@ -193,12 +189,9 @@
     DLOG(ERROR) << "No color format support.";
     return false;
   }
-  media_codec_.reset(media::VideoCodecBridge::CreateEncoder(codec,
-                                                            input_visible_size,
-                                                            initial_bitrate,
-                                                            INITIAL_FRAMERATE,
-                                                            IFRAME_INTERVAL,
-                                                            pixel_format));
+  media_codec_.reset(media::VideoCodecBridge::CreateEncoder(
+      codec, input_visible_size, initial_bitrate, INITIAL_FRAMERATE,
+      IFRAME_INTERVAL, pixel_format));
 
   if (!media_codec_) {
     DLOG(ERROR) << "Failed to create/start the codec: "
@@ -212,19 +205,15 @@
   base::MessageLoop::current()->PostTask(
       FROM_HERE,
       base::Bind(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
-                 client_ptr_factory_->GetWeakPtr(),
-                 frame_input_count,
-                 input_visible_size,
-                 output_buffer_capacity));
+                 client_ptr_factory_->GetWeakPtr(), frame_input_count,
+                 input_visible_size, output_buffer_capacity));
   return true;
 }
 
 void AndroidVideoEncodeAccelerator::MaybeStartIOTimer() {
   if (!io_timer_.IsRunning() &&
       (num_buffers_at_codec_ > 0 || !pending_frames_.empty())) {
-    io_timer_.Start(FROM_HERE,
-                    EncodePollDelay(),
-                    this,
+    io_timer_.Start(FROM_HERE, EncodePollDelay(), this,
                     &AndroidVideoEncodeAccelerator::DoIOTask);
   }
 }
@@ -315,8 +304,7 @@
   if (status != media::MEDIA_CODEC_OK) {
     DCHECK(status == media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER ||
            status == media::MEDIA_CODEC_ERROR);
-    RETURN_ON_FAILURE(status != media::MEDIA_CODEC_ERROR,
-                      "MediaCodec error",
+    RETURN_ON_FAILURE(status != media::MEDIA_CODEC_ERROR, "MediaCodec error",
                       kPlatformFailureError);
     return;
   }
@@ -352,24 +340,18 @@
   int dst_stride_uv = frame->stride(VideoFrame::kUPlane) * 2;
   // Why NV12?  Because COLOR_FORMAT_YUV420_SEMIPLANAR.  See comment at other
   // mention of that constant.
-  bool converted = !libyuv::I420ToNV12(frame->data(VideoFrame::kYPlane),
-                                       frame->stride(VideoFrame::kYPlane),
-                                       frame->data(VideoFrame::kUPlane),
-                                       frame->stride(VideoFrame::kUPlane),
-                                       frame->data(VideoFrame::kVPlane),
-                                       frame->stride(VideoFrame::kVPlane),
-                                       dst_y,
-                                       dst_stride_y,
-                                       dst_uv,
-                                       dst_stride_uv,
-                                       frame->coded_size().width(),
-                                       frame->coded_size().height());
+  bool converted = !libyuv::I420ToNV12(
+      frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
+      frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
+      frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
+      dst_y, dst_stride_y, dst_uv, dst_stride_uv, frame->coded_size().width(),
+      frame->coded_size().height());
   RETURN_ON_FAILURE(converted, "Failed to I420ToNV12!", kPlatformFailureError);
 
   fake_input_timestamp_ += base::TimeDelta::FromMicroseconds(1);
-  status = media_codec_->QueueInputBuffer(
-      input_buf_index, NULL, queued_size, fake_input_timestamp_);
-  UMA_HISTOGRAM_TIMES("Media.AVEA.InputQueueTime",
+  status = media_codec_->QueueInputBuffer(input_buf_index, NULL, queued_size,
+                                          fake_input_timestamp_);
+  UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime",
                       base::Time::Now() - base::get<2>(input));
   RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK,
                     "Failed to QueueInputBuffer: " << status,
@@ -434,14 +416,11 @@
   media_codec_->ReleaseOutputBuffer(buf_index, false);
   --num_buffers_at_codec_;
 
-  UMA_HISTOGRAM_COUNTS_10000("Media.AVEA.EncodedBufferSizeKB", size / 1024);
   base::MessageLoop::current()->PostTask(
       FROM_HERE,
       base::Bind(&VideoEncodeAccelerator::Client::BitstreamBufferReady,
-                 client_ptr_factory_->GetWeakPtr(),
-                 bitstream_buffer.id(),
-                 size,
+                 client_ptr_factory_->GetWeakPtr(), bitstream_buffer.id(), size,
                  key_frame));
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/android_video_encode_accelerator.h b/media/gpu/android_video_encode_accelerator.h
similarity index 91%
rename from content/common/gpu/media/android_video_encode_accelerator.h
rename to media/gpu/android_video_encode_accelerator.h
index 025814a7..071a161 100644
--- a/content/common/gpu/media/android_video_encode_accelerator.h
+++ b/media/gpu/android_video_encode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -18,22 +18,22 @@
 #include "base/threading/thread_checker.h"
 #include "base/timer/timer.h"
 #include "base/tuple.h"
-#include "content/common/content_export.h"
 #include "media/base/android/sdk_media_codec_bridge.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_encode_accelerator.h"
 
 namespace media {
 class BitstreamBuffer;
 }  // namespace media
 
-namespace content {
+namespace media {
 
 // Android-specific implementation of media::VideoEncodeAccelerator, enabling
 // hardware-acceleration of video encoding, based on Android's MediaCodec class
 // (http://developer.android.com/reference/android/media/MediaCodec.html).  This
 // class expects to live and be called on a single thread (the GPU process'
 // ChildThread).
-class CONTENT_EXPORT AndroidVideoEncodeAccelerator
+class MEDIA_GPU_EXPORT AndroidVideoEncodeAccelerator
     : public media::VideoEncodeAccelerator {
  public:
   AndroidVideoEncodeAccelerator();
@@ -110,6 +110,6 @@
   DISALLOW_COPY_AND_ASSIGN(AndroidVideoEncodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/media/gpu/args.gni b/media/gpu/args.gni
new file mode 100644
index 0000000..c1511d9
--- /dev/null
+++ b/media/gpu/args.gni
@@ -0,0 +1,12 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+declare_args() {
+  # Indicates if V4L plugin is used.
+  use_v4lplugin = false
+
+  # Indicates if Video4Linux2 codec is used. This is used for all CrOS
+  # platforms which have v4l2 hardware encoder / decoder.
+  use_v4l2_codec = false
+}
diff --git a/content/common/gpu/media/avda_codec_image.cc b/media/gpu/avda_codec_image.cc
similarity index 97%
rename from content/common/gpu/media/avda_codec_image.cc
rename to media/gpu/avda_codec_image.cc
index d4a30fe3..8fdb6f54 100644
--- a/content/common/gpu/media/avda_codec_image.cc
+++ b/media/gpu/avda_codec_image.cc
@@ -2,23 +2,23 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/avda_codec_image.h"
+#include "media/gpu/avda_codec_image.h"
 
 #include <string.h>
 
 #include <memory>
 
 #include "base/metrics/histogram_macros.h"
-#include "content/common/gpu/media/avda_shared_state.h"
 #include "gpu/command_buffer/service/context_group.h"
 #include "gpu/command_buffer/service/context_state.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
 #include "gpu/command_buffer/service/texture_manager.h"
+#include "media/gpu/avda_shared_state.h"
 #include "ui/gl/android/surface_texture.h"
 #include "ui/gl/gl_context.h"
 #include "ui/gl/scoped_make_current.h"
 
-namespace content {
+namespace media {
 
 AVDACodecImage::AVDACodecImage(
     int picture_buffer_id,
@@ -284,4 +284,4 @@
   return codec_buffer_index_ != kInvalidCodecBufferIndex && media_codec_;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/avda_codec_image.h b/media/gpu/avda_codec_image.h
similarity index 95%
rename from content/common/gpu/media/avda_codec_image.h
rename to media/gpu/avda_codec_image.h
index 43506be..317c747 100644
--- a/content/common/gpu/media/avda_codec_image.h
+++ b/media/gpu/avda_codec_image.h
@@ -2,22 +2,22 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_AVDA_CODEC_IMAGE_H_
-#define CONTENT_COMMON_GPU_MEDIA_AVDA_CODEC_IMAGE_H_
+#ifndef MEDIA_GPU_AVDA_CODEC_IMAGE_H_
+#define MEDIA_GPU_AVDA_CODEC_IMAGE_H_
 
 #include <stdint.h>
 
 #include <memory>
 
 #include "base/macros.h"
-#include "content/common/gpu/media/avda_shared_state.h"
 #include "gpu/command_buffer/service/gl_stream_texture_image.h"
+#include "media/gpu/avda_shared_state.h"
 
 namespace ui {
 class ScopedMakeCurrent;
 }
 
-namespace content {
+namespace media {
 
 // GLImage that renders MediaCodec buffers to a SurfaceTexture or SurfaceView as
 // needed in order to draw them.
@@ -165,6 +165,6 @@
   DISALLOW_COPY_AND_ASSIGN(AVDACodecImage);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_AVDA_CODEC_IMAGE_H_
+#endif  // MEDIA_GPU_AVDA_CODEC_IMAGE_H_
diff --git a/content/common/gpu/media/avda_return_on_failure.h b/media/gpu/avda_return_on_failure.h
similarity index 89%
rename from content/common/gpu/media/avda_return_on_failure.h
rename to media/gpu/avda_return_on_failure.h
index e295c9d..b68d9a1 100644
--- a/content/common/gpu/media/avda_return_on_failure.h
+++ b/media/gpu/avda_return_on_failure.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_AVDA_RETURN_ON_FAILURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_AVDA_RETURN_ON_FAILURE_H_
+#ifndef MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
+#define MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
 
 #include "media/video/video_decode_accelerator.h"
 
@@ -31,4 +31,4 @@
 // Return null if !ptr.
 #define RETURN_NULL_IF_NULL(ptr) RETURN_IF_NULL(ptr, 0)
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_AVDA_RETURN_ON_FAILURE_H_
+#endif  // MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
diff --git a/content/common/gpu/media/avda_shared_state.cc b/media/gpu/avda_shared_state.cc
similarity index 92%
rename from content/common/gpu/media/avda_shared_state.cc
rename to media/gpu/avda_shared_state.cc
index f55a3b02..f1ee105 100644
--- a/content/common/gpu/media/avda_shared_state.cc
+++ b/media/gpu/avda_shared_state.cc
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/avda_shared_state.h"
+#include "media/gpu/avda_shared_state.h"
 
 #include "base/time/time.h"
-#include "content/common/gpu/media/avda_codec_image.h"
+#include "media/gpu/avda_codec_image.h"
 #include "ui/gl/gl_bindings.h"
 #include "ui/gl/scoped_make_current.h"
 
-namespace content {
+namespace media {
 
 AVDASharedState::AVDASharedState()
     : surface_texture_service_id_(0),
@@ -67,4 +67,4 @@
   return it == codec_images_.end() ? nullptr : it->second;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/avda_shared_state.h b/media/gpu/avda_shared_state.h
similarity index 98%
rename from content/common/gpu/media/avda_shared_state.h
rename to media/gpu/avda_shared_state.h
index 79534f2..c1e01a1 100644
--- a/content/common/gpu/media/avda_shared_state.h
+++ b/media/gpu/avda_shared_state.h
@@ -18,11 +18,9 @@
 }
 
 namespace media {
-class MediaCodecBridge;
-}
 
-namespace content {
 class AVDACodecImage;
+class MediaCodecBridge;
 
 // Shared state to allow communication between the AVDA and the
 // GLImages that configure GL for drawing the frames.
@@ -107,6 +105,6 @@
   DISALLOW_COPY_AND_ASSIGN(AVDASharedState);
 };
 
-}  // namespace content
+}  // namespace media
 
 #endif  // CONTENT_COMMON_GPU_AVDA_SHARED_STATE_H_
diff --git a/content/common/gpu/media/avda_state_provider.h b/media/gpu/avda_state_provider.h
similarity index 94%
rename from content/common/gpu/media/avda_state_provider.h
rename to media/gpu/avda_state_provider.h
index e7dfac6..742403d3 100644
--- a/content/common/gpu/media/avda_state_provider.h
+++ b/media/gpu/avda_state_provider.h
@@ -7,8 +7,8 @@
 
 #include "base/compiler_specific.h"
 #include "base/threading/thread_checker.h"
-#include "content/common/content_export.h"
 #include "gpu/command_buffer/service/texture_manager.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 
 namespace gfx {
@@ -25,7 +25,7 @@
 class VideoCodecBridge;
 }
 
-namespace content {
+namespace media {
 
 // Helper class that provides the BackingStrategy with enough state
 // to do useful work.
@@ -47,6 +47,6 @@
                          media::VideoDecodeAccelerator::Error error) = 0;
 };
 
-}  // namespace content
+}  // namespace media
 
 #endif  // CONTENT_COMMON_GPU_MEDIA_ANDROID_VIDEO_DECODE_ACCELERATOR_STATE_PROVIDER_H_
diff --git a/content/common/gpu/media/avda_surface_tracker.cc b/media/gpu/avda_surface_tracker.cc
similarity index 91%
rename from content/common/gpu/media/avda_surface_tracker.cc
rename to media/gpu/avda_surface_tracker.cc
index fbc49ea..8638f5c 100644
--- a/content/common/gpu/media/avda_surface_tracker.cc
+++ b/media/gpu/avda_surface_tracker.cc
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/avda_surface_tracker.h"
+#include "media/gpu/avda_surface_tracker.h"
 
 #include "base/callback.h"
 #include "base/lazy_instance.h"
 #include "base/threading/thread_checker.h"
 
-namespace content {
+namespace media {
 
 namespace {
 static base::LazyInstance<AVDASurfaceTracker> g_lazy_instance =
@@ -46,4 +46,4 @@
   return g_lazy_instance.Pointer();
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/avda_surface_tracker.h b/media/gpu/avda_surface_tracker.h
similarity index 83%
rename from content/common/gpu/media/avda_surface_tracker.h
rename to media/gpu/avda_surface_tracker.h
index b1d5258d..0f35ff6 100644
--- a/content/common/gpu/media/avda_surface_tracker.h
+++ b/media/gpu/avda_surface_tracker.h
@@ -2,16 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_AVDA_SURFACE_TRACKER_H_
-#define CONTENT_COMMON_GPU_MEDIA_AVDA_SURFACE_TRACKER_H_
+#ifndef MEDIA_GPU_AVDA_SURFACE_TRACKER_H_
+#define MEDIA_GPU_AVDA_SURFACE_TRACKER_H_
 
 #include <vector>
 
 #include "base/callback.h"
 #include "base/threading/thread_checker.h"
-#include "content/common/content_export.h"
+#include "media/gpu/media_gpu_export.h"
 
-namespace content {
+namespace media {
 
 using >
 
@@ -22,7 +22,7 @@
 // onSurfaceDestroyed().
 // This is not thread safe. All access should be on the gpu main thread.
 // Callbacks will be run on the same thread.
-class AVDASurfaceTracker {
+class MEDIA_GPU_EXPORT AVDASurfaceTracker {
  public:
   AVDASurfaceTracker();
   ~AVDASurfaceTracker();
@@ -47,6 +47,6 @@
   DISALLOW_COPY_AND_ASSIGN(AVDASurfaceTracker);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_AVDA_SURFACE_TRACKER_H_
+#endif  // MEDIA_GPU_AVDA_SURFACE_TRACKER_H_
diff --git a/content/common/gpu/media/dxva_video_decode_accelerator_win.cc b/media/gpu/dxva_video_decode_accelerator_win.cc
similarity index 79%
rename from content/common/gpu/media/dxva_video_decode_accelerator_win.cc
rename to media/gpu/dxva_video_decode_accelerator_win.cc
index 2f27010..88b5f56 100644
--- a/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
+++ b/media/gpu/dxva_video_decode_accelerator_win.cc
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#include "media/gpu/dxva_video_decode_accelerator_win.h"
 
 #include <memory>
 
 #if !defined(OS_WIN)
 #error This file should only be built on Windows.
-#endif   // !defined(OS_WIN)
+#endif  // !defined(OS_WIN)
 
 #include <codecapi.h>
 #include <dxgi1_2.h>
@@ -50,49 +50,45 @@
 
 const wchar_t kVP8DecoderDLLName[] =
 #if defined(ARCH_CPU_X86)
-  L"mfx_mft_vp8vd_32.dll";
+    L"mfx_mft_vp8vd_32.dll";
 #elif defined(ARCH_CPU_X86_64)
-  L"mfx_mft_vp8vd_64.dll";
+    L"mfx_mft_vp8vd_64.dll";
 #else
 #error Unsupported Windows CPU Architecture
 #endif
 
 const wchar_t kVP9DecoderDLLName[] =
 #if defined(ARCH_CPU_X86)
-  L"mfx_mft_vp9vd_32.dll";
+    L"mfx_mft_vp9vd_32.dll";
 #elif defined(ARCH_CPU_X86_64)
-  L"mfx_mft_vp9vd_64.dll";
+    L"mfx_mft_vp9vd_64.dll";
 #else
 #error Unsupported Windows CPU Architecture
 #endif
 
 const CLSID CLSID_WebmMfVp8Dec = {
-  0x451e3cb7,
-  0x2622,
-  0x4ba5,
-  { 0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24 }
-};
+    0x451e3cb7,
+    0x2622,
+    0x4ba5,
+    {0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24}};
 
 const CLSID CLSID_WebmMfVp9Dec = {
-  0x07ab4bd2,
-  0x1979,
-  0x4fcd,
-  { 0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe }
-};
+    0x07ab4bd2,
+    0x1979,
+    0x4fcd,
+    {0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe}};
 
 const CLSID MEDIASUBTYPE_VP80 = {
-  0x30385056,
-  0x0000,
-  0x0010,
-  { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }
-};
+    0x30385056,
+    0x0000,
+    0x0010,
+    {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
 
 const CLSID MEDIASUBTYPE_VP90 = {
-  0x30395056,
-  0x0000,
-  0x0010,
-  { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }
-};
+    0x30395056,
+    0x0000,
+    0x0010,
+    {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
 
 // The CLSID of the video processor media foundation transform which we use for
 // texture color conversion in DX11.
@@ -100,8 +96,17 @@
 // to detect which SDK we are compiling with.
 #if VER_PRODUCTBUILD < 10011  // VER_PRODUCTBUILD for 10.0.10158.0 SDK.
 DEFINE_GUID(CLSID_VideoProcessorMFT,
-            0x88753b26, 0x5b24, 0x49bd, 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78,
-            0xc9, 0x82);
+            0x88753b26,
+            0x5b24,
+            0x49bd,
+            0xb2,
+            0xe7,
+            0xc,
+            0x44,
+            0x5c,
+            0x78,
+            0xc9,
+            0x82);
 #endif
 
 // MF_XVP_PLAYBACK_MODE
@@ -109,57 +114,69 @@
 // If this attribute is TRUE, the video processor will run in playback mode
 // where it allows callers to allocate output samples and allows last frame
 // regeneration (repaint).
-DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12,
-            0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9);
+DEFINE_GUID(MF_XVP_PLAYBACK_MODE,
+            0x3c5d293f,
+            0xad67,
+            0x4e29,
+            0xaf,
+            0x12,
+            0xcf,
+            0x3e,
+            0x23,
+            0x8a,
+            0xcc,
+            0xe9);
 
 // Defines the GUID for the Intel H264 DXVA device.
 static const GUID DXVA2_Intel_ModeH264_E = {
-  0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}
-};
+    0x604F8E68,
+    0x4951,
+    0x4c54,
+    {0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}};
 
 // R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3
 // or earlier, and don't handle resolutions higher than 1920 x 1088 well.
 static const DWORD g_AMDUVD3GPUList[] = {
-  0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0,
-  0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb,
-  0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589,
-  0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504,
-  0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517,
-  0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc,
-  0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597,
-  0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615,
-  0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441,
-  0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450,
-  0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b,
-  0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490,
-  0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
-  0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1,
-  0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4,
-  0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe,
-  0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de,
-  0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be,
-  0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899,
-  0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806,
-  0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a,
-  0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649,
-  0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728,
-  0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744,
-  0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758,
-  0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
-  0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765,
-  0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b,
-  0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708,
-  0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901,
-  0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b,
-  0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919,
-  0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998,
-  0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4,
+    0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0,
+    0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb,
+    0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589,
+    0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504,
+    0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517,
+    0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc,
+    0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597,
+    0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615,
+    0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441,
+    0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450,
+    0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b,
+    0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490,
+    0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
+    0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1,
+    0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4,
+    0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe,
+    0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de,
+    0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be,
+    0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899,
+    0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806,
+    0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a,
+    0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649,
+    0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728,
+    0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744,
+    0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758,
+    0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
+    0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765,
+    0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b,
+    0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708,
+    0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901,
+    0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b,
+    0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919,
+    0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998,
+    0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4,
 };
 
 // Legacy Intel GPUs (Second generation) which have trouble with resolutions
 // higher than 1920 x 1088
 static const DWORD g_IntelLegacyGPUList[] = {
-  0x102, 0x106, 0x116, 0x126,
+    0x102, 0x106, 0x116, 0x126,
 };
 
 // Provides scoped access to the underlying buffer in an IMFMediaBuffer
@@ -180,13 +197,9 @@
     CHECK(SUCCEEDED(hr));
   }
 
-  uint8_t* get() {
-    return buffer_;
-  }
+  uint8_t* get() { return buffer_; }
 
-  DWORD current_length() const {
-    return current_length_;
-  }
+  DWORD current_length() const { return current_length_; }
 
  private:
   base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_;
@@ -199,46 +212,40 @@
 
 }  // namespace
 
-namespace content {
+namespace media {
 
 static const media::VideoCodecProfile kSupportedProfiles[] = {
-  media::H264PROFILE_BASELINE,
-  media::H264PROFILE_MAIN,
-  media::H264PROFILE_HIGH,
-  media::VP8PROFILE_ANY,
-  media::VP9PROFILE_PROFILE0,
-  media::VP9PROFILE_PROFILE1,
-  media::VP9PROFILE_PROFILE2,
-  media::VP9PROFILE_PROFILE3
-};
+    media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
+    media::H264PROFILE_HIGH,     media::VP8PROFILE_ANY,
+    media::VP9PROFILE_PROFILE0,  media::VP9PROFILE_PROFILE1,
+    media::VP9PROFILE_PROFILE2,  media::VP9PROFILE_PROFILE3};
 
-CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_
-    = NULL;
+CreateDXGIDeviceManager
+    DXVAVideoDecodeAccelerator::create_dxgi_device_manager_ = NULL;
 
-#define RETURN_ON_FAILURE(result, log, ret)  \
-  do {                                       \
-    if (!(result)) {                         \
-      DLOG(ERROR) << log;                    \
-      return ret;                            \
-    }                                        \
+#define RETURN_ON_FAILURE(result, log, ret) \
+  do {                                      \
+    if (!(result)) {                        \
+      DLOG(ERROR) << log;                   \
+      return ret;                           \
+    }                                       \
   } while (0)
 
-#define RETURN_ON_HR_FAILURE(result, log, ret)                    \
-  RETURN_ON_FAILURE(SUCCEEDED(result),                            \
-                    log << ", HRESULT: 0x" << std::hex << result, \
-                    ret);
+#define RETURN_ON_HR_FAILURE(result, log, ret) \
+  RETURN_ON_FAILURE(SUCCEEDED(result),         \
+                    log << ", HRESULT: 0x" << std::hex << result, ret);
 
-#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret)  \
-  do {                                                              \
-    if (!(result)) {                                                \
-      DVLOG(1) << log;                                              \
-      StopOnError(error_code);                                      \
-      return ret;                                                   \
-    }                                                               \
+#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
+  do {                                                             \
+    if (!(result)) {                                               \
+      DVLOG(1) << log;                                             \
+      StopOnError(error_code);                                     \
+      return ret;                                                  \
+    }                                                              \
   } while (0)
 
-#define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret)  \
-  RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result),                      \
+#define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret)        \
+  RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result),                            \
                                log << ", HRESULT: 0x" << std::hex << result, \
                                error_code, ret);
 
@@ -284,9 +291,8 @@
     // with the align argument being 0.
     hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
   } else {
-    hr = MFCreateAlignedMemoryBuffer(buffer_length,
-                                     align - 1,
-                                     buffer.Receive());
+    hr =
+        MFCreateAlignedMemoryBuffer(buffer_length, align - 1, buffer.Receive());
   }
   RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL);
 
@@ -309,8 +315,8 @@
   CHECK(stream);
   CHECK_GT(size, 0U);
   base::win::ScopedComPtr<IMFSample> sample;
-  sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
-                                            alignment));
+  sample.Attach(
+      CreateEmptySampleWithBuffer(std::max(min_size, size), alignment));
   RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", NULL);
 
   base::win::ScopedComPtr<IMFMediaBuffer> buffer;
@@ -340,24 +346,24 @@
 // is to use the CoCreateInstance API which requires the COM apartment to be
 // initialized which is not the case on the GPU main thread. We want to avoid
 // initializing COM as it may have sideeffects.
-HRESULT CreateCOMObjectFromDll(HMODULE dll, const CLSID& clsid, const IID& iid,
+HRESULT CreateCOMObjectFromDll(HMODULE dll,
+                               const CLSID& clsid,
+                               const IID& iid,
                                void** object) {
   if (!dll || !object)
     return E_INVALIDARG;
 
-  using GetClassObject = HRESULT (WINAPI*)(
-      const CLSID& clsid, const IID& iid, void** object);
+  using GetClassObject =
+      HRESULT(WINAPI*)(const CLSID& clsid, const IID& iid, void** object);
 
   GetClassObject get_class_object = reinterpret_cast<GetClassObject>(
       GetProcAddress(dll, "DllGetClassObject"));
-  RETURN_ON_FAILURE(
-      get_class_object, "Failed to get DllGetClassObject pointer", E_FAIL);
+  RETURN_ON_FAILURE(get_class_object, "Failed to get DllGetClassObject pointer",
+                    E_FAIL);
 
   base::win::ScopedComPtr<IClassFactory> factory;
-  HRESULT hr = get_class_object(
-      clsid,
-      __uuidof(IClassFactory),
-      factory.ReceiveVoid());
+  HRESULT hr =
+      get_class_object(clsid, __uuidof(IClassFactory), factory.ReceiveVoid());
   RETURN_ON_HR_FAILURE(hr, "DllGetClassObject failed", hr);
 
   hr = factory->CreateInstance(NULL, iid, object);
@@ -367,7 +373,7 @@
 // Helper function to query the ANGLE device object. The template argument T
 // identifies the device interface being queried. IDirect3DDevice9Ex for d3d9
 // and ID3D11Device for dx11.
-template<class T>
+template <class T>
 base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) {
   base::win::ScopedComPtr<T> device_object;
 
@@ -380,19 +386,16 @@
     egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
   }
 
-  RETURN_ON_FAILURE(
-      gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"),
-      "EGL_EXT_device_query missing",
-      device_object);
+  RETURN_ON_FAILURE(gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"),
+                    "EGL_EXT_device_query missing", device_object);
 
   PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr;
 
   {
     TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
 
-    QueryDisplayAttribEXT =
-        reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
-            "eglQueryDisplayAttribEXT"));
+    QueryDisplayAttribEXT = reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(
+        eglGetProcAddress("eglQueryDisplayAttribEXT"));
 
     RETURN_ON_FAILURE(
         QueryDisplayAttribEXT,
@@ -405,9 +408,8 @@
   {
     TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
 
-    QueryDeviceAttribEXT =
-        reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
-            "eglQueryDeviceAttribEXT"));
+    QueryDeviceAttribEXT = reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(
+        eglGetProcAddress("eglQueryDeviceAttribEXT"));
 
     RETURN_ON_FAILURE(
         QueryDeviceAttribEXT,
@@ -424,19 +426,16 @@
         device_object);
   }
 
-  RETURN_ON_FAILURE(
-      egl_device,
-      "Failed to get the EGL device",
-      device_object);
+  RETURN_ON_FAILURE(egl_device, "Failed to get the EGL device", device_object);
 
   {
     TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
 
     RETURN_ON_FAILURE(
-        QueryDeviceAttribEXT(
-            reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
-            "The eglQueryDeviceAttribEXT function failed to get the device",
-            device_object);
+        QueryDeviceAttribEXT(reinterpret_cast<EGLDeviceEXT>(egl_device),
+                             object_type, &device),
+        "The eglQueryDeviceAttribEXT function failed to get the device",
+        device_object);
 
     RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
   }
@@ -449,11 +448,9 @@
     : last_sps_id_(0),
       last_pps_id_(0),
       config_changed_(false),
-      pending_config_changed_(false) {
-}
+      pending_config_changed_(false) {}
 
-H264ConfigChangeDetector::~H264ConfigChangeDetector() {
-}
+H264ConfigChangeDetector::~H264ConfigChangeDetector() {}
 
 bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream,
                                             unsigned int size) {
@@ -572,27 +569,18 @@
   // Copies the output sample data to the picture buffer provided by the
   // client.
   // The dest_surface parameter contains the decoded bits.
-  bool CopyOutputSampleDataToPictureBuffer(
-      DXVAVideoDecodeAccelerator* decoder,
-      IDirect3DSurface9* dest_surface,
-      ID3D11Texture2D* dx11_texture,
-      int input_buffer_id);
+  bool CopyOutputSampleDataToPictureBuffer(DXVAVideoDecodeAccelerator* decoder,
+                                           IDirect3DSurface9* dest_surface,
+                                           ID3D11Texture2D* dx11_texture,
+                                           int input_buffer_id);
 
-  bool available() const {
-    return available_;
-  }
+  bool available() const { return available_; }
 
-  void set_available(bool available) {
-    available_ = available;
-  }
+  void set_available(bool available) { available_ = available; }
 
-  int id() const {
-    return picture_buffer_.id();
-  }
+  int id() const { return picture_buffer_.id(); }
 
-  gfx::Size size() const {
-    return picture_buffer_.size();
-  }
+  gfx::Size size() const { return picture_buffer_.size(); }
 
   bool waiting_to_reuse() const { return waiting_to_reuse_; }
 
@@ -661,13 +649,15 @@
   if (!picture_buffer->InitializeTexture(decoder, !!use_rgb))
     return linked_ptr<DXVAPictureBuffer>(nullptr);
 
-  EGLint attrib_list[] = {
-    EGL_WIDTH, buffer.size().width(),
-    EGL_HEIGHT, buffer.size().height(),
-    EGL_TEXTURE_FORMAT, use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA,
-    EGL_TEXTURE_TARGET, EGL_TEXTURE_2D,
-    EGL_NONE
-  };
+  EGLint attrib_list[] = {EGL_WIDTH,
+                          buffer.size().width(),
+                          EGL_HEIGHT,
+                          buffer.size().height(),
+                          EGL_TEXTURE_FORMAT,
+                          use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA,
+                          EGL_TEXTURE_TARGET,
+                          EGL_TEXTURE_2D,
+                          EGL_NONE};
 
   picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer(
       egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
@@ -752,14 +742,9 @@
   if (decoding_surface_) {
     EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
 
-    eglReleaseTexImage(
-        egl_display,
-        decoding_surface_,
-        EGL_BACK_BUFFER);
+    eglReleaseTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
 
-    eglDestroySurface(
-        egl_display,
-        decoding_surface_);
+    eglDestroySurface(egl_display, decoding_surface_);
     decoding_surface_ = NULL;
   }
 }
@@ -767,10 +752,7 @@
 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
   DCHECK(decoding_surface_);
   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
-  eglReleaseTexImage(
-    egl_display,
-    decoding_surface_,
-    EGL_BACK_BUFFER);
+  eglReleaseTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
   decoder_surface_.Release();
   target_surface_.Release();
   decoder_dx11_texture_.Release();
@@ -792,11 +774,10 @@
 }
 
 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
-    CopyOutputSampleDataToPictureBuffer(
-        DXVAVideoDecodeAccelerator* decoder,
-        IDirect3DSurface9* dest_surface,
-        ID3D11Texture2D* dx11_texture,
-        int input_buffer_id) {
+    CopyOutputSampleDataToPictureBuffer(DXVAVideoDecodeAccelerator* decoder,
+                                        IDirect3DSurface9* dest_surface,
+                                        ID3D11Texture2D* dx11_texture,
+                                        int input_buffer_id) {
   DCHECK(dest_surface || dx11_texture);
   if (dx11_texture) {
     // Grab a reference on the decoder texture. This reference will be released
@@ -874,10 +855,7 @@
   }
 
   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
-  eglBindTexImage(
-      egl_display,
-      decoding_surface_,
-      EGL_BACK_BUFFER);
+  eglBindTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
 
   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
   glBindTexture(GL_TEXTURE_2D, current_texture);
@@ -958,7 +936,8 @@
   }
   if (!profile_supported) {
     RETURN_AND_NOTIFY_ON_FAILURE(false,
-        "Unsupported h.264, vp8, or vp9 profile", PLATFORM_FAILURE, false);
+                                 "Unsupported h.264, vp8, or vp9 profile",
+                                 PLATFORM_FAILURE, false);
   }
 
   // Not all versions of Windows 7 and later include Media Foundation DLLs.
@@ -969,33 +948,32 @@
   RETURN_ON_FAILURE(dxgi_manager_dll, "MFPlat.dll is required for decoding",
                     false);
 
-  // On Windows 8+ mfplat.dll provides the MFCreateDXGIDeviceManager API.
-  // On Windows 7 mshtmlmedia.dll provides it.
+// On Windows 8+ mfplat.dll provides the MFCreateDXGIDeviceManager API.
+// On Windows 7 mshtmlmedia.dll provides it.
 
-  // TODO(ananta)
-  // The code below works, as in we can create the DX11 device manager for
-  // Windows 7. However the IMFTransform we use for texture conversion and
-  // copy does not exist on Windows 7. Look into an alternate approach
-  // and enable the code below.
+// TODO(ananta)
+// The code below works, as in we can create the DX11 device manager for
+// Windows 7. However the IMFTransform we use for texture conversion and
+// copy does not exist on Windows 7. Look into an alternate approach
+// and enable the code below.
 #if defined(ENABLE_DX11_FOR_WIN7)
   if (base::win::GetVersion() == base::win::VERSION_WIN7) {
     dxgi_manager_dll = ::GetModuleHandle(L"mshtmlmedia.dll");
     RETURN_ON_FAILURE(dxgi_manager_dll,
-        "mshtmlmedia.dll is required for decoding", false);
+                      "mshtmlmedia.dll is required for decoding", false);
   }
 #endif
   // If we don't find the MFCreateDXGIDeviceManager API we fallback to D3D9
   // decoding.
   if (dxgi_manager_dll && !create_dxgi_device_manager_) {
     create_dxgi_device_manager_ = reinterpret_cast<CreateDXGIDeviceManager>(
-            ::GetProcAddress(dxgi_manager_dll, "MFCreateDXGIDeviceManager"));
+        ::GetProcAddress(dxgi_manager_dll, "MFCreateDXGIDeviceManager"));
   }
 
   RETURN_AND_NOTIFY_ON_FAILURE(
       gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle,
       "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable",
-      PLATFORM_FAILURE,
-      false);
+      PLATFORM_FAILURE, false);
 
   RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(),
                                "GL fences are unsupported", PLATFORM_FAILURE,
@@ -1003,15 +981,18 @@
 
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized),
-      "Initialize: invalid state: " << state, ILLEGAL_STATE, false);
+                               "Initialize: invalid state: " << state,
+                               ILLEGAL_STATE, false);
 
   media::InitializeMediaFoundation();
 
   RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(config.profile),
-      "Failed to initialize decoder", PLATFORM_FAILURE, false);
+                               "Failed to initialize decoder", PLATFORM_FAILURE,
+                               false);
 
   RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
-      "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
+                               "Failed to get input/output stream info.",
+                               PLATFORM_FAILURE, false);
 
   RETURN_AND_NOTIFY_ON_FAILURE(
       SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
@@ -1043,10 +1024,9 @@
 
   hr = d3d9_->CheckDeviceFormatConversion(
       D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL,
-      static_cast<D3DFORMAT>(MAKEFOURCC('N', 'V', '1', '2')),
-      D3DFMT_X8R8G8B8);
-  RETURN_ON_HR_FAILURE(hr,
-      "D3D9 driver does not support H/W format conversion", false);
+      static_cast<D3DFORMAT>(MAKEFOURCC('N', 'V', '1', '2')), D3DFMT_X8R8G8B8);
+  RETURN_ON_HR_FAILURE(hr, "D3D9 driver does not support H/W format conversion",
+                       false);
 
   base::win::ScopedComPtr<IDirect3DDevice9> angle_device =
       QueryDeviceObjectFromANGLE<IDirect3DDevice9>(EGL_D3D9_DEVICE_ANGLE);
@@ -1055,8 +1035,8 @@
 
   if (using_angle_device_) {
     hr = d3d9_device_ex_.QueryFrom(angle_device.get());
-    RETURN_ON_HR_FAILURE(hr,
-        "QueryInterface for IDirect3DDevice9Ex from angle device failed",
+    RETURN_ON_HR_FAILURE(
+        hr, "QueryInterface for IDirect3DDevice9Ex from angle device failed",
         false);
   } else {
     D3DPRESENT_PARAMETERS present_params = {0};
@@ -1071,15 +1051,11 @@
     present_params.FullScreen_RefreshRateInHz = 0;
     present_params.PresentationInterval = 0;
 
-    hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT,
-                               D3DDEVTYPE_HAL,
-                               NULL,
-                               D3DCREATE_FPU_PRESERVE |
-                               D3DCREATE_MIXED_VERTEXPROCESSING |
-                               D3DCREATE_MULTITHREADED,
-                               &present_params,
-                               NULL,
-                               d3d9_device_ex_.Receive());
+    hr = d3d9_->CreateDeviceEx(
+        D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, NULL,
+        D3DCREATE_FPU_PRESERVE | D3DCREATE_MIXED_VERTEXPROCESSING |
+            D3DCREATE_MULTITHREADED,
+        &present_params, NULL, d3d9_device_ex_.Receive());
     RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false);
   }
 
@@ -1109,14 +1085,9 @@
   // The ordering MUST be preserved. All applications are assumed to support
   // 9.1 unless otherwise stated by the application.
   D3D_FEATURE_LEVEL feature_levels[] = {
-    D3D_FEATURE_LEVEL_11_1,
-    D3D_FEATURE_LEVEL_11_0,
-    D3D_FEATURE_LEVEL_10_1,
-    D3D_FEATURE_LEVEL_10_0,
-    D3D_FEATURE_LEVEL_9_3,
-    D3D_FEATURE_LEVEL_9_2,
-    D3D_FEATURE_LEVEL_9_1
-  };
+      D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1,
+      D3D_FEATURE_LEVEL_10_0, D3D_FEATURE_LEVEL_9_3,  D3D_FEATURE_LEVEL_9_2,
+      D3D_FEATURE_LEVEL_9_1};
 
   UINT flags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
 
@@ -1125,16 +1096,10 @@
 #endif
 
   D3D_FEATURE_LEVEL feature_level_out = D3D_FEATURE_LEVEL_11_0;
-  hr = D3D11CreateDevice(NULL,
-                         D3D_DRIVER_TYPE_HARDWARE,
-                         NULL,
-                         flags,
-                         feature_levels,
-                         arraysize(feature_levels),
-                         D3D11_SDK_VERSION,
-                         d3d11_device_.Receive(),
-                         &feature_level_out,
-                         d3d11_device_context_.Receive());
+  hr = D3D11CreateDevice(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, flags,
+                         feature_levels, arraysize(feature_levels),
+                         D3D11_SDK_VERSION, d3d11_device_.Receive(),
+                         &feature_level_out, d3d11_device_context_.Receive());
   RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device", false);
 
   // Enable multithreaded mode on the device. This ensures that accesses to
@@ -1152,20 +1117,16 @@
   D3D11_QUERY_DESC query_desc;
   query_desc.Query = D3D11_QUERY_EVENT;
   query_desc.MiscFlags = 0;
-  hr = d3d11_device_->CreateQuery(
-      &query_desc,
-      d3d11_query_.Receive());
+  hr = d3d11_device_->CreateQuery(&query_desc, d3d11_query_.Receive());
   RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device query", false);
 
   HMODULE video_processor_dll = ::GetModuleHandle(L"msvproc.dll");
   RETURN_ON_FAILURE(video_processor_dll, "Failed to load video processor",
                     false);
 
-  hr = CreateCOMObjectFromDll(
-      video_processor_dll,
-      CLSID_VideoProcessorMFT,
-      __uuidof(IMFTransform),
-      video_format_converter_mft_.ReceiveVoid());
+  hr = CreateCOMObjectFromDll(video_processor_dll, CLSID_VideoProcessorMFT,
+                              __uuidof(IMFTransform),
+                              video_format_converter_mft_.ReceiveVoid());
   if (FAILED(hr)) {
     base::debug::Alias(&hr);
     // TODO(ananta)
@@ -1183,15 +1144,11 @@
 
   hr = converter_attributes->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE);
   RETURN_ON_HR_FAILURE(
-      hr,
-      "Failed to set MF_XVP_PLAYBACK_MODE attribute on converter",
-      false);
+      hr, "Failed to set MF_XVP_PLAYBACK_MODE attribute on converter", false);
 
   hr = converter_attributes->SetUINT32(MF_LOW_LATENCY, FALSE);
   RETURN_ON_HR_FAILURE(
-      hr,
-      "Failed to set MF_LOW_LATENCY attribute on converter",
-      false);
+      hr, "Failed to set MF_LOW_LATENCY attribute on converter", false);
   return true;
 }
 
@@ -1203,9 +1160,9 @@
   base::SharedMemory shm(bitstream_buffer.handle(), true);
 
   State state = GetState();
-  RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped ||
-                                state == kFlushing),
-           "Invalid state: " << state, ILLEGAL_STATE,);
+  RETURN_AND_NOTIFY_ON_FAILURE(
+      (state == kNormal || state == kStopped || state == kFlushing),
+      "Invalid state: " << state, ILLEGAL_STATE, );
   if (bitstream_buffer.id() < 0) {
     RETURN_AND_NOTIFY_ON_FAILURE(
         false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
@@ -1224,13 +1181,13 @@
   RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample",
                                PLATFORM_FAILURE, );
 
-  RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()),
-      "Failed to associate input buffer id with sample", PLATFORM_FAILURE,);
+  RETURN_AND_NOTIFY_ON_HR_FAILURE(
+      sample->SetSampleTime(bitstream_buffer.id()),
+      "Failed to associate input buffer id with sample", PLATFORM_FAILURE, );
 
   decoder_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal,
-                 base::Unretained(this), sample));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal,
+                            base::Unretained(this), sample));
 }
 
 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
@@ -1239,32 +1196,35 @@
 
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
-      "Invalid state: " << state, ILLEGAL_STATE,);
-  RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers >= buffers.size()),
-      "Failed to provide requested picture buffers. (Got " << buffers.size() <<
-      ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,);
+                               "Invalid state: " << state, ILLEGAL_STATE, );
+  RETURN_AND_NOTIFY_ON_FAILURE(
+      (kNumPictureBuffers >= buffers.size()),
+      "Failed to provide requested picture buffers. (Got "
+          << buffers.size() << ", requested " << kNumPictureBuffers << ")",
+      INVALID_ARGUMENT, );
 
   // Copy the picture buffers provided by the client to the available list,
   // and mark these buffers as available for use.
-  for (size_t buffer_index = 0; buffer_index < buffers.size();
-       ++buffer_index) {
+  for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) {
     DCHECK_LE(1u, buffers[buffer_index].texture_ids().size());
     linked_ptr<DXVAPictureBuffer> picture_buffer =
         DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
     RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
-        "Failed to allocate picture buffer", PLATFORM_FAILURE,);
+                                 "Failed to allocate picture buffer",
+                                 PLATFORM_FAILURE, );
 
-    bool inserted = output_picture_buffers_.insert(std::make_pair(
-        buffers[buffer_index].id(), picture_buffer)).second;
+    bool inserted =
+        output_picture_buffers_
+            .insert(std::make_pair(buffers[buffer_index].id(), picture_buffer))
+            .second;
     DCHECK(inserted);
   }
 
   ProcessPendingSamples();
   if (pending_flush_) {
     decoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+                              base::Unretained(this)));
   }
 }
 
@@ -1273,7 +1233,7 @@
 
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
-      "Invalid state: " << state, ILLEGAL_STATE,);
+                               "Invalid state: " << state, ILLEGAL_STATE, );
 
   if (output_picture_buffers_.empty() && stale_output_picture_buffers_.empty())
     return;
@@ -1289,7 +1249,8 @@
     if (!stale_output_picture_buffers_.empty()) {
       it = stale_output_picture_buffers_.find(picture_buffer_id);
       RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
-          "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
+                                   "Invalid picture id: " << picture_buffer_id,
+                                   INVALID_ARGUMENT, );
       main_thread_task_runner_->PostTask(
           FROM_HERE,
           base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
@@ -1353,9 +1314,8 @@
   ProcessPendingSamples();
   if (pending_flush_) {
     decoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+                              base::Unretained(this)));
   }
 }
 
@@ -1366,16 +1326,16 @@
 
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped),
-      "Unexpected decoder state: " << state, ILLEGAL_STATE,);
+                               "Unexpected decoder state: " << state,
+                               ILLEGAL_STATE, );
 
   SetState(kFlushing);
 
   pending_flush_ = true;
 
   decoder_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
-                  base::Unretained(this)));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+                            base::Unretained(this)));
 }
 
 void DXVAVideoDecodeAccelerator::Reset() {
@@ -1385,7 +1345,8 @@
 
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped),
-      "Reset: invalid state: " << state, ILLEGAL_STATE,);
+                               "Reset: invalid state: " << state,
+                               ILLEGAL_STATE, );
 
   decoder_thread_.Stop();
 
@@ -1395,11 +1356,10 @@
   // frames and set the corresponding picture buffer as available.
   PendingOutputSamples::iterator index;
   for (index = pending_output_samples_.begin();
-       index != pending_output_samples_.end();
-       ++index) {
+       index != pending_output_samples_.end(); ++index) {
     if (index->picture_buffer_id != -1) {
-      OutputBuffers::iterator it = output_picture_buffers_.find(
-          index->picture_buffer_id);
+      OutputBuffers::iterator it =
+          output_picture_buffers_.find(index->picture_buffer_id);
       if (it != output_picture_buffers_.end()) {
         DXVAPictureBuffer* picture_buffer = it->second.get();
         picture_buffer->ReusePictureBuffer();
@@ -1412,12 +1372,12 @@
   NotifyInputBuffersDropped();
 
   RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
-      "Reset: Failed to send message.", PLATFORM_FAILURE,);
+                               "Reset: Failed to send message.",
+                               PLATFORM_FAILURE, );
 
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone,
-                 weak_this_factory_.GetWeakPtr()));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone,
+                            weak_this_factory_.GetWeakPtr()));
 
   StartDecoderThread();
   SetState(kNormal);
@@ -1443,7 +1403,7 @@
 media::VideoDecodeAccelerator::SupportedProfiles
 DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
   TRACE_EVENT0("gpu,startup",
-    "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
+               "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
 
   // TODO(henryhsu): Need to ensure the profiles are actually supported.
   SupportedProfiles profiles;
@@ -1479,8 +1439,7 @@
 // static
 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution(
     media::VideoCodecProfile profile) {
-  TRACE_EVENT0("gpu,startup",
-    "DXVAVideoDecodeAccelerator::GetMinResolution");
+  TRACE_EVENT0("gpu,startup", "DXVAVideoDecodeAccelerator::GetMinResolution");
   std::pair<int, int> min_resolution;
   if (profile >= media::H264PROFILE_BASELINE &&
       profile <= media::H264PROFILE_HIGH) {
@@ -1499,8 +1458,7 @@
 // static
 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution(
     const media::VideoCodecProfile profile) {
-  TRACE_EVENT0("gpu,startup",
-               "DXVAVideoDecodeAccelerator::GetMaxResolution");
+  TRACE_EVENT0("gpu,startup", "DXVAVideoDecodeAccelerator::GetMaxResolution");
   std::pair<int, int> max_resolution;
   if (profile >= media::H264PROFILE_BASELINE &&
       profile <= media::H264PROFILE_HIGH) {
@@ -1539,7 +1497,7 @@
 
   {
     TRACE_EVENT0("gpu,startup",
-      "GetMaxH264Resolution. QueryDeviceObjectFromANGLE");
+                 "GetMaxH264Resolution. QueryDeviceObjectFromANGLE");
 
     device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE);
     if (!device.get())
@@ -1547,7 +1505,7 @@
   }
 
   base::win::ScopedComPtr<ID3D11VideoDevice> video_device;
-  hr = device.QueryInterface(IID_ID3D11VideoDevice,
+  hr = device.QueryInterface(__uuidof(ID3D11VideoDevice),
                              video_device.ReceiveVoid());
   if (FAILED(hr))
     return max_resolution;
@@ -1563,9 +1521,8 @@
     for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) {
       GUID profile_id = {};
       hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id);
-      if (SUCCEEDED(hr) &&
-          (profile_id == DXVA2_ModeH264_E ||
-          profile_id == DXVA2_Intel_ModeH264_E)) {
+      if (SUCCEEDED(hr) && (profile_id == DXVA2_ModeH264_E ||
+                            profile_id == DXVA2_Intel_ModeH264_E)) {
         decoder_guid = profile_id;
         found = true;
         break;
@@ -1584,20 +1541,17 @@
   // TODO(ananta)
   // Look into whether this list needs to be expanded.
   static std::pair<int, int> resolution_array[] = {
-    // Use 1088 to account for 16x16 macroblocks.
-    std::make_pair(1920, 1088),
-    std::make_pair(2560, 1440),
-    std::make_pair(3840, 2160),
-    std::make_pair(4096, 2160),
-    std::make_pair(4096, 2304),
+      // Use 1088 to account for 16x16 macroblocks.
+      std::make_pair(1920, 1088), std::make_pair(2560, 1440),
+      std::make_pair(3840, 2160), std::make_pair(4096, 2160),
+      std::make_pair(4096, 2304),
   };
 
   {
     TRACE_EVENT0("gpu,startup",
                  "GetMaxH264Resolution. Resolution search begin");
 
-    for (size_t res_idx = 0; res_idx < arraysize(resolution_array);
-         res_idx++) {
+    for (size_t res_idx = 0; res_idx < arraysize(resolution_array); res_idx++) {
       D3D11_VIDEO_DECODER_DESC desc = {};
       desc.Guid = decoder_guid;
       desc.SampleWidth = resolution_array[res_idx].first;
@@ -1615,7 +1569,7 @@
 
       base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder;
       hr = video_device->CreateVideoDecoder(&desc, &config,
-          video_decoder.Receive());
+                                            video_decoder.Receive());
       if (!video_decoder.get())
         return max_resolution;
 
@@ -1659,7 +1613,7 @@
   // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList
   // arrays above. If yes then the device is treated as a legacy device.
   if ((adapter_desc.VendorId == kAMDGPUId1) ||
-       adapter_desc.VendorId == kAMDGPUId2) {
+      adapter_desc.VendorId == kAMDGPUId2) {
     {
       TRACE_EVENT0("gpu,startup",
                    "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check");
@@ -1671,7 +1625,7 @@
   } else if (adapter_desc.VendorId == kIntelGPU) {
     {
       TRACE_EVENT0("gpu,startup",
-        "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check");
+                   "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check");
       for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) {
         if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i])
           return legacy_gpu;
@@ -1703,13 +1657,11 @@
     // fall back to software decoding. See crbug/403440.
     std::unique_ptr<FileVersionInfo> version_info(
         FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll));
-    RETURN_ON_FAILURE(version_info,
-                      "unable to get version of msmpeg2vdec.dll",
+    RETURN_ON_FAILURE(version_info, "unable to get version of msmpeg2vdec.dll",
                       false);
     base::string16 file_version = version_info->file_version();
     RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos,
-                      "blacklisted version of msmpeg2vdec.dll 6.1.7140",
-                      false);
+                      "blacklisted version of msmpeg2vdec.dll 6.1.7140", false);
     codec_ = media::kCodecH264;
     clsid = __uuidof(CMSH264DecoderMFT);
   } else if (enable_accelerated_vpx_decode_ &&
@@ -1726,7 +1678,7 @@
 
     base::FilePath dll_path;
     RETURN_ON_FAILURE(PathService::Get(program_files_key, &dll_path),
-        "failed to get path for Program Files", false);
+                      "failed to get path for Program Files", false);
 
     dll_path = dll_path.Append(kVPXDecoderDLLPath);
     if (profile == media::VP8PROFILE_ANY) {
@@ -1739,16 +1691,14 @@
       clsid = CLSID_WebmMfVp9Dec;
     }
     decoder_dll = ::LoadLibraryEx(dll_path.value().data(), NULL,
-        LOAD_WITH_ALTERED_SEARCH_PATH);
+                                  LOAD_WITH_ALTERED_SEARCH_PATH);
     RETURN_ON_FAILURE(decoder_dll, "vpx decoder dll is not loaded", false);
   } else {
     RETURN_ON_FAILURE(false, "Unsupported codec.", false);
   }
 
-  HRESULT hr = CreateCOMObjectFromDll(decoder_dll,
-                                      clsid,
-                                      __uuidof(IMFTransform),
-                                      decoder_.ReceiveVoid());
+  HRESULT hr = CreateCOMObjectFromDll(
+      decoder_dll, clsid, __uuidof(IMFTransform), decoder_.ReceiveVoid());
   RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false);
 
   RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
@@ -1759,21 +1709,18 @@
     CHECK(create_dxgi_device_manager_);
     RETURN_AND_NOTIFY_ON_FAILURE(CreateDX11DevManager(),
                                  "Failed to initialize DX11 device and manager",
-                                 PLATFORM_FAILURE,
-                                 false);
-    device_manager_to_use = reinterpret_cast<ULONG_PTR>(
-        d3d11_device_manager_.get());
+                                 PLATFORM_FAILURE, false);
+    device_manager_to_use =
+        reinterpret_cast<ULONG_PTR>(d3d11_device_manager_.get());
   } else {
     RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
                                  "Failed to initialize D3D device and manager",
-                                 PLATFORM_FAILURE,
-                                 false);
+                                 PLATFORM_FAILURE, false);
     device_manager_to_use = reinterpret_cast<ULONG_PTR>(device_manager_.get());
   }
 
-  hr = decoder_->ProcessMessage(
-            MFT_MESSAGE_SET_D3D_MANAGER,
-            device_manager_to_use);
+  hr = decoder_->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER,
+                                device_manager_to_use);
   if (use_dx11_) {
     RETURN_ON_HR_FAILURE(hr, "Failed to pass DX11 manager to decoder", false);
   } else {
@@ -1782,24 +1729,18 @@
 
   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
 
-  EGLint config_attribs[] = {
-    EGL_BUFFER_SIZE, 32,
-    EGL_RED_SIZE, 8,
-    EGL_GREEN_SIZE, 8,
-    EGL_BLUE_SIZE, 8,
-    EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
-    EGL_ALPHA_SIZE, 0,
-    EGL_NONE
-  };
+  EGLint config_attribs[] = {EGL_BUFFER_SIZE,  32,
+                             EGL_RED_SIZE,     8,
+                             EGL_GREEN_SIZE,   8,
+                             EGL_BLUE_SIZE,    8,
+                             EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
+                             EGL_ALPHA_SIZE,   0,
+                             EGL_NONE};
 
   EGLint num_configs;
 
-  if (!eglChooseConfig(
-      egl_display,
-      config_attribs,
-      &egl_config_,
-      1,
-      &num_configs))
+  if (!eglChooseConfig(egl_display, config_attribs, &egl_config_, 1,
+                       &num_configs))
     return false;
 
   return SetDecoderMediaTypes();
@@ -1922,8 +1863,8 @@
   // The flags here should be the same and mean the same thing, except when
   // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
   // allocate its own sample.
-  DVLOG(1) << "Flags: "
-          << std::hex << std::showbase << output_stream_info_.dwFlags;
+  DVLOG(1) << "Flags: " << std::hex << std::showbase
+           << output_stream_info_.dwFlags;
   if (codec_ == media::kCodecH264) {
     CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
   }
@@ -1939,15 +1880,14 @@
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE(
       (state == kNormal || state == kFlushing || state == kStopped),
-          "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,);
+      "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE, );
 
   MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
   DWORD status = 0;
 
   HRESULT hr = decoder_->ProcessOutput(0,  // No flags
                                        1,  // # of out streams to pull from
-                                       &output_data_buffer,
-                                       &status);
+                                       &output_data_buffer, &status);
   IMFCollection* events = output_data_buffer.pEvents;
   if (events != NULL) {
     DVLOG(1) << "Got events from ProcessOuput, but discarding";
@@ -1985,7 +1925,8 @@
   inputs_before_decode_ = 0;
 
   RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
-      "Failed to process output sample.", PLATFORM_FAILURE,);
+                               "Failed to process output sample.",
+                               PLATFORM_FAILURE, );
 }
 
 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
@@ -2021,11 +1962,8 @@
 
   // Go ahead and request picture buffers.
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
-                 weak_this_factory_.GetWeakPtr(),
-                 width,
-                 height));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
+                            weak_this_factory_.GetWeakPtr(), width, height));
 
   pictures_requested_ = true;
   return true;
@@ -2044,8 +1982,7 @@
   OutputBuffers::iterator index;
 
   for (index = output_picture_buffers_.begin();
-       index != output_picture_buffers_.end() &&
-       OutputSamplesPresent();
+       index != output_picture_buffers_.end() && OutputSamplesPresent();
        ++index) {
     if (index->second->available()) {
       PendingSampleInfo* pending_sample = NULL;
@@ -2059,10 +1996,11 @@
 
       int width = 0;
       int height = 0;
-      if (!GetVideoFrameDimensions(pending_sample->output_sample.get(),
-          &width, &height)) {
-        RETURN_AND_NOTIFY_ON_FAILURE(false,
-            "Failed to get D3D surface from output sample", PLATFORM_FAILURE,);
+      if (!GetVideoFrameDimensions(pending_sample->output_sample.get(), &width,
+                                   &height)) {
+        RETURN_AND_NOTIFY_ON_FAILURE(
+            false, "Failed to get D3D surface from output sample",
+            PLATFORM_FAILURE, );
       }
 
       if (width != index->second->size().width() ||
@@ -2074,8 +2012,8 @@
       base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
       HRESULT hr = pending_sample->output_sample->GetBufferByIndex(
           0, output_buffer.Receive());
-      RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-          "Failed to get buffer from output sample", PLATFORM_FAILURE,);
+      RETURN_AND_NOTIFY_ON_HR_FAILURE(
+          hr, "Failed to get buffer from output sample", PLATFORM_FAILURE, );
 
       base::win::ScopedComPtr<IDirect3DSurface9> surface;
       base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture;
@@ -2083,8 +2021,9 @@
       if (use_dx11_) {
         base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer;
         hr = dxgi_buffer.QueryFrom(output_buffer.get());
-        RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-            "Failed to get DXGIBuffer from output sample", PLATFORM_FAILURE,);
+        RETURN_AND_NOTIFY_ON_HR_FAILURE(
+            hr, "Failed to get DXGIBuffer from output sample",
+            PLATFORM_FAILURE, );
         hr = dxgi_buffer->GetResource(
             __uuidof(ID3D11Texture2D),
             reinterpret_cast<void**>(d3d11_texture.Receive()));
@@ -2092,18 +2031,16 @@
         hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE,
                           IID_PPV_ARGS(surface.Receive()));
       }
-      RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-          "Failed to get surface from output sample", PLATFORM_FAILURE,);
+      RETURN_AND_NOTIFY_ON_HR_FAILURE(
+          hr, "Failed to get surface from output sample", PLATFORM_FAILURE, );
 
       pending_sample->picture_buffer_id = index->second->id();
 
       RETURN_AND_NOTIFY_ON_FAILURE(
           index->second->CopyOutputSampleDataToPictureBuffer(
-              this,
-              surface.get(),
-              d3d11_texture.get(),
+              this, surface.get(), d3d11_texture.get(),
               pending_sample->input_buffer_id),
-          "Failed to copy output sample", PLATFORM_FAILURE,);
+          "Failed to copy output sample", PLATFORM_FAILURE, );
 
       index->second->set_available(false);
     }
@@ -2111,13 +2048,11 @@
 }
 
 void DXVAVideoDecodeAccelerator::StopOnError(
-  media::VideoDecodeAccelerator::Error error) {
+    media::VideoDecodeAccelerator::Error error) {
   if (!main_thread_task_runner_->BelongsToCurrentThread()) {
     main_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::StopOnError,
-                   weak_this_factory_.GetWeakPtr(),
-                   error));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::StopOnError,
+                              weak_this_factory_.GetWeakPtr(), error));
     return;
   }
 
@@ -2207,16 +2142,15 @@
   }
 }
 
-void DXVAVideoDecodeAccelerator::NotifyPictureReady(
-    int picture_buffer_id,
-    int input_buffer_id) {
+void DXVAVideoDecodeAccelerator::NotifyPictureReady(int picture_buffer_id,
+                                                    int input_buffer_id) {
   DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
   // This task could execute after the decoder has been torn down.
   if (GetState() != kUninitialized && client_) {
     // TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use
     // coded size here so use (0, 0) intentionally to have the client choose.
-    media::Picture picture(picture_buffer_id, input_buffer_id,
-                           gfx::Rect(0, 0), false);
+    media::Picture picture(picture_buffer_id, input_buffer_id, gfx::Rect(0, 0),
+                           false);
     client_->PictureReady(picture);
   }
 }
@@ -2230,7 +2164,7 @@
        it != pending_input_buffers_.end(); ++it) {
     LONGLONG input_buffer_id = 0;
     RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id),
-                         "Failed to get buffer id associated with sample",);
+                         "Failed to get buffer id associated with sample", );
     client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
   }
   pending_input_buffers_.clear();
@@ -2240,7 +2174,7 @@
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   State state = GetState();
   RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
-      "Invalid state: " << state, ILLEGAL_STATE,);
+                               "Invalid state: " << state, ILLEGAL_STATE, );
 
   if (pending_input_buffers_.empty() || OutputSamplesPresent())
     return;
@@ -2268,11 +2202,10 @@
     decoder_thread_task_runner_->PostTask(
         FROM_HERE,
         base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
-                    base::Unretained(this)));
+                   base::Unretained(this)));
     decoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
-                    base::Unretained(this)));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+                              base::Unretained(this)));
     return;
   }
 
@@ -2281,7 +2214,7 @@
     if (!sent_drain_message_) {
       RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
                                    "Failed to send drain message",
-                                   PLATFORM_FAILURE,);
+                                   PLATFORM_FAILURE, );
       sent_drain_message_ = true;
     }
   }
@@ -2296,9 +2229,8 @@
   SetState(kFlushing);
 
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone,
-                 weak_this_factory_.GetWeakPtr()));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone,
+                            weak_this_factory_.GetWeakPtr()));
 
   SetState(kNormal);
 }
@@ -2321,15 +2253,13 @@
 
   HRESULT hr = CheckConfigChanged(sample.get(), &config_changed);
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config",
-      PLATFORM_FAILURE,);
+                                  PLATFORM_FAILURE, );
 
   if (config_changed) {
     pending_input_buffers_.push_back(sample);
     main_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged,
-                   weak_this_factory_.GetWeakPtr(),
-                   config_));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged,
+                              weak_this_factory_.GetWeakPtr(), config_));
     return;
   }
 
@@ -2355,10 +2285,10 @@
     // process any more input until that frame is copied to the target surface.
     if (!OutputSamplesPresent()) {
       State state = GetState();
-      RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal ||
-                                    state == kFlushing),
+      RETURN_AND_NOTIFY_ON_FAILURE(
+          (state == kStopped || state == kNormal || state == kFlushing),
           "Failed to process output. Unexpected decoder state: " << state,
-          PLATFORM_FAILURE,);
+          PLATFORM_FAILURE, );
       hr = decoder_->ProcessInput(0, sample.get(), 0);
     }
     // If we continue to get the MF_E_NOTACCEPTING error we do the following:-
@@ -2375,24 +2305,25 @@
       decoder_thread_task_runner_->PostTask(
           FROM_HERE,
           base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
-                      base::Unretained(this)));
+                     base::Unretained(this)));
       return;
     }
   }
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
-      PLATFORM_FAILURE,);
+                                  PLATFORM_FAILURE, );
 
   DoDecode();
 
   State state = GetState();
-  RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal ||
-                                state == kFlushing),
+  RETURN_AND_NOTIFY_ON_FAILURE(
+      (state == kStopped || state == kNormal || state == kFlushing),
       "Failed to process output. Unexpected decoder state: " << state,
-      ILLEGAL_STATE,);
+      ILLEGAL_STATE, );
 
   LONGLONG input_buffer_id = 0;
-  RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
-                       "Failed to get input buffer id associated with sample",);
+  RETURN_ON_HR_FAILURE(
+      sample->GetSampleTime(&input_buffer_id),
+      "Failed to get input buffer id associated with sample", );
   // The Microsoft Media foundation decoder internally buffers up to 30 frames
   // before returning a decoded frame. We need to inform the client that this
   // input buffer is processed as it may stop sending us further input.
@@ -2404,10 +2335,8 @@
   // http://code.google.com/p/chromium/issues/detail?id=108121
   // http://code.google.com/p/chromium/issues/detail?id=150925
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead,
-                 weak_this_factory_.GetWeakPtr(),
-                 input_buffer_id));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead,
+                            weak_this_factory_.GetWeakPtr(), input_buffer_id));
 }
 
 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
@@ -2415,16 +2344,12 @@
   dx11_video_format_converter_media_type_needs_init_ = true;
 
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
-                 weak_this_factory_.GetWeakPtr(), false));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
+                            weak_this_factory_.GetWeakPtr(), false));
 
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
-                 weak_this_factory_.GetWeakPtr(),
-                 width,
-                 height));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
+                            weak_this_factory_.GetWeakPtr(), width, height));
 }
 
 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) {
@@ -2435,8 +2360,7 @@
   OutputBuffers::iterator index;
 
   for (index = output_picture_buffers_.begin();
-       index != output_picture_buffers_.end();
-       ++index) {
+       index != output_picture_buffers_.end(); ++index) {
     if (force || index->second->available()) {
       DVLOG(1) << "Dismissing picture id: " << index->second->id();
       client_->DismissPictureBuffer(index->second->id());
@@ -2456,16 +2380,15 @@
                                "Failed to make context current",
                                PLATFORM_FAILURE, );
 
-  OutputBuffers::iterator it = stale_output_picture_buffers_.find(
-      picture_buffer_id);
+  OutputBuffers::iterator it =
+      stale_output_picture_buffers_.find(picture_buffer_id);
   DCHECK(it != stale_output_picture_buffers_.end());
   DVLOG(1) << "Dismissing picture id: " << it->second->id();
   client_->DismissPictureBuffer(it->second->id());
   stale_output_picture_buffers_.erase(it);
 }
 
-DXVAVideoDecodeAccelerator::State
-DXVAVideoDecodeAccelerator::GetState() {
+DXVAVideoDecodeAccelerator::State DXVAVideoDecodeAccelerator::GetState() {
   static_assert(sizeof(State) == sizeof(long), "mismatched type sizes");
   State state = static_cast<State>(
       InterlockedAdd(reinterpret_cast<volatile long*>(&state_), 0));
@@ -2475,16 +2398,13 @@
 void DXVAVideoDecodeAccelerator::SetState(State new_state) {
   if (!main_thread_task_runner_->BelongsToCurrentThread()) {
     main_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::SetState,
-                   weak_this_factory_.GetWeakPtr(),
-                   new_state));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::SetState,
+                              weak_this_factory_.GetWeakPtr(), new_state));
     return;
   }
 
   static_assert(sizeof(State) == sizeof(long), "mismatched type sizes");
-  ::InterlockedExchange(reinterpret_cast<volatile long*>(&state_),
-                        new_state);
+  ::InterlockedExchange(reinterpret_cast<volatile long*>(&state_), new_state);
   DCHECK_EQ(state_, new_state);
 }
 
@@ -2505,24 +2425,20 @@
                                              int input_buffer_id) {
   if (!decoder_thread_task_runner_->BelongsToCurrentThread()) {
     decoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::CopySurface,
-                   base::Unretained(this),
-                   src_surface,
-                   dest_surface,
-                   picture_buffer_id,
-                   input_buffer_id));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurface,
+                              base::Unretained(this), src_surface, dest_surface,
+                              picture_buffer_id, input_buffer_id));
     return;
   }
 
   HRESULT hr = d3d9_device_ex_->StretchRect(src_surface, NULL, dest_surface,
                                             NULL, D3DTEXF_NONE);
-  RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",);
+  RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed", );
 
   // Ideally, this should be done immediately before the draw call that uses
   // the texture. Flush it once here though.
   hr = query_->Issue(D3DISSUE_END);
-  RETURN_ON_HR_FAILURE(hr, "Failed to issue END",);
+  RETURN_ON_HR_FAILURE(hr, "Failed to issue END", );
 
   // If we are sharing the ANGLE device we don't need to wait for the Flush to
   // complete.
@@ -2530,21 +2446,17 @@
     main_thread_task_runner_->PostTask(
         FROM_HERE,
         base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
-                   weak_this_factory_.GetWeakPtr(),
-                   src_surface,
-                   dest_surface,
-                   picture_buffer_id,
-                   input_buffer_id));
+                   weak_this_factory_.GetWeakPtr(), src_surface, dest_surface,
+                   picture_buffer_id, input_buffer_id));
     return;
   }
 
   // Flush the decoder device to ensure that the decoded frame is copied to the
   // target surface.
   decoder_thread_task_runner_->PostDelayedTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
-                 base::Unretained(this), 0, src_surface, dest_surface,
-                 picture_buffer_id, input_buffer_id),
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
+                            base::Unretained(this), 0, src_surface,
+                            dest_surface, picture_buffer_id, input_buffer_id),
       base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
 }
 
@@ -2590,9 +2502,8 @@
 
   if (pending_flush_) {
     decoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+                              base::Unretained(this)));
     return;
   }
   decoder_thread_task_runner_->PostTask(
@@ -2633,7 +2544,7 @@
                                                      source_desc.Height)) {
       RETURN_AND_NOTIFY_ON_FAILURE(
           false, "Failed to initialize media types for convesion.",
-          PLATFORM_FAILURE,);
+          PLATFORM_FAILURE, );
     }
 
     // The input to the video processor is the output sample.
@@ -2675,22 +2586,21 @@
   base::win::ScopedComPtr<IMFSample> output_sample;
   hr = MFCreateSample(output_sample.Receive());
   if (FAILED(hr)) {
-    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-        "Failed to create output sample.", PLATFORM_FAILURE,);
+    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to create output sample.",
+                                    PLATFORM_FAILURE, );
   }
 
   base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
-  hr = MFCreateDXGISurfaceBuffer(
-      __uuidof(ID3D11Texture2D), dest_texture, 0, FALSE,
-      output_buffer.Receive());
+  hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), dest_texture, 0,
+                                 FALSE, output_buffer.Receive());
   if (FAILED(hr)) {
     base::debug::Alias(&hr);
     // TODO(ananta)
     // Remove this CHECK when the change to use DX11 for H/W decoding
     // stablizes.
     CHECK(false);
-    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-        "Failed to create output sample.", PLATFORM_FAILURE,);
+    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to create output sample.",
+                                    PLATFORM_FAILURE, );
   }
 
   output_sample->AddBuffer(output_buffer.get());
@@ -2698,18 +2608,17 @@
   hr = video_format_converter_mft_->ProcessInput(0, video_frame, 0);
   if (FAILED(hr)) {
     DCHECK(false);
-    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-        "Failed to convert output sample format.", PLATFORM_FAILURE,);
+    RETURN_AND_NOTIFY_ON_HR_FAILURE(
+        hr, "Failed to convert output sample format.", PLATFORM_FAILURE, );
   }
 
   DWORD status = 0;
   MFT_OUTPUT_DATA_BUFFER format_converter_output = {};
   format_converter_output.pSample = output_sample.get();
   hr = video_format_converter_mft_->ProcessOutput(
-        0,  // No flags
-        1,  // # of out streams to pull from
-        &format_converter_output,
-        &status);
+      0,  // No flags
+      1,  // # of out streams to pull from
+      &format_converter_output, &status);
 
   if (FAILED(hr)) {
     base::debug::Alias(&hr);
@@ -2717,8 +2626,8 @@
     // Remove this CHECK when the change to use DX11 for H/W decoding
     // stablizes.
     CHECK(false);
-    RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-        "Failed to convert output sample format.", PLATFORM_FAILURE,);
+    RETURN_AND_NOTIFY_ON_HR_FAILURE(
+        hr, "Failed to convert output sample format.", PLATFORM_FAILURE, );
   }
 
   if (dest_keyed_mutex) {
@@ -2744,12 +2653,11 @@
   }
 }
 
-void DXVAVideoDecodeAccelerator::FlushDecoder(
-    int iterations,
-    IDirect3DSurface9* src_surface,
-    IDirect3DSurface9* dest_surface,
-    int picture_buffer_id,
-    int input_buffer_id) {
+void DXVAVideoDecodeAccelerator::FlushDecoder(int iterations,
+                                              IDirect3DSurface9* src_surface,
+                                              IDirect3DSurface9* dest_surface,
+                                              int picture_buffer_id,
+                                              int input_buffer_id) {
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   // The DXVA decoder has its own device which it uses for decoding. ANGLE
@@ -2784,26 +2692,22 @@
 
   if ((hr == S_FALSE) && (++iterations < kMaxIterationsForD3DFlush)) {
     decoder_thread_task_runner_->PostDelayedTask(
-        FROM_HERE,
-        base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
-                   base::Unretained(this), iterations, src_surface,
-                   dest_surface, picture_buffer_id, input_buffer_id),
+        FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
+                              base::Unretained(this), iterations, src_surface,
+                              dest_surface, picture_buffer_id, input_buffer_id),
         base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
     return;
   }
 
   main_thread_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
-                 weak_this_factory_.GetWeakPtr(),
-                 src_surface,
-                 dest_surface,
-                 picture_buffer_id,
-                 input_buffer_id));
+      FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
+                            weak_this_factory_.GetWeakPtr(), src_surface,
+                            dest_surface, picture_buffer_id, input_buffer_id));
 }
 
 bool DXVAVideoDecodeAccelerator::InitializeDX11VideoFormatConverterMediaType(
-    int width, int height) {
+    int width,
+    int height) {
   if (!dx11_video_format_converter_media_type_needs_init_)
     return true;
 
@@ -2811,8 +2715,7 @@
 
   HRESULT hr = video_format_converter_mft_->ProcessMessage(
       MFT_MESSAGE_SET_D3D_MANAGER,
-      reinterpret_cast<ULONG_PTR>(
-          d3d11_device_manager_.get()));
+      reinterpret_cast<ULONG_PTR>(d3d11_device_manager_.get()));
 
   if (FAILED(hr)) {
     base::debug::Alias(&hr);
@@ -2822,27 +2725,28 @@
     CHECK(false);
   }
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
-      "Failed to initialize video format converter", PLATFORM_FAILURE, false);
+                                  "Failed to initialize video format converter",
+                                  PLATFORM_FAILURE, false);
 
-  video_format_converter_mft_->ProcessMessage(
-      MFT_MESSAGE_NOTIFY_END_STREAMING, 0);
+  video_format_converter_mft_->ProcessMessage(MFT_MESSAGE_NOTIFY_END_STREAMING,
+                                              0);
 
   base::win::ScopedComPtr<IMFMediaType> media_type;
   hr = MFCreateMediaType(media_type.Receive());
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFCreateMediaType failed",
-      PLATFORM_FAILURE, false);
+                                  PLATFORM_FAILURE, false);
 
   hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set major input type",
-      PLATFORM_FAILURE, false);
+                                  PLATFORM_FAILURE, false);
 
   hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set input sub type",
-      PLATFORM_FAILURE, false);
+                                  PLATFORM_FAILURE, false);
 
   hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, height);
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set media type attributes",
-      PLATFORM_FAILURE, false);
+                                  PLATFORM_FAILURE, false);
 
   hr = video_format_converter_mft_->SetInputType(0, media_type.get(), 0);
   if (FAILED(hr)) {
@@ -2853,22 +2757,16 @@
     CHECK(false);
   }
   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set converter input type",
-      PLATFORM_FAILURE, false);
+                                  PLATFORM_FAILURE, false);
 
   // It appears that we fail to set MFVideoFormat_ARGB32 as the output media
   // type in certain configurations. Try to fallback to MFVideoFormat_RGB32
   // in such cases. If both fail, then bail.
-  bool media_type_set =
-      SetTransformOutputType(video_format_converter_mft_.get(),
-                             MFVideoFormat_ARGB32,
-                             width,
-                             height);
+  bool media_type_set = SetTransformOutputType(
+      video_format_converter_mft_.get(), MFVideoFormat_ARGB32, width, height);
   if (!media_type_set) {
-    media_type_set =
-        SetTransformOutputType(video_format_converter_mft_.get(),
-                               MFVideoFormat_RGB32,
-                               width,
-                               height);
+    media_type_set = SetTransformOutputType(video_format_converter_mft_.get(),
+                                            MFVideoFormat_RGB32, width, height);
   }
 
   if (!media_type_set) {
@@ -2882,10 +2780,9 @@
   return true;
 }
 
-bool DXVAVideoDecodeAccelerator::GetVideoFrameDimensions(
-    IMFSample* sample,
-    int* width,
-    int* height) {
+bool DXVAVideoDecodeAccelerator::GetVideoFrameDimensions(IMFSample* sample,
+                                                         int* width,
+                                                         int* height) {
   base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
   HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
   RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false);
@@ -2920,17 +2817,15 @@
   return true;
 }
 
-bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
-    IMFTransform* transform,
-    const GUID& output_type,
-    int width,
-    int height) {
+bool DXVAVideoDecodeAccelerator::SetTransformOutputType(IMFTransform* transform,
+                                                        const GUID& output_type,
+                                                        int width,
+                                                        int height) {
   HRESULT hr = E_FAIL;
   base::win::ScopedComPtr<IMFMediaType> media_type;
 
   for (uint32_t i = 0;
-       SUCCEEDED(transform->GetOutputAvailableType(
-           0, i, media_type.Receive()));
+       SUCCEEDED(transform->GetOutputAvailableType(0, i, media_type.Receive()));
        ++i) {
     GUID out_subtype = {0};
     hr = media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
@@ -2951,8 +2846,8 @@
   return false;
 }
 
-HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(
-    IMFSample* sample, bool* config_changed) {
+HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(IMFSample* sample,
+                                                       bool* config_changed) {
   if (codec_ != media::kCodecH264)
     return S_FALSE;
 
@@ -2963,17 +2858,15 @@
   MediaBufferScopedPointer scoped_media_buffer(buffer.get());
 
   if (!config_change_detector_->DetectConfig(
-          scoped_media_buffer.get(),
-          scoped_media_buffer.current_length())) {
+          scoped_media_buffer.get(), scoped_media_buffer.current_length())) {
     RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config",
-        E_FAIL);
+                         E_FAIL);
   }
   *config_changed = config_change_detector_->config_changed();
   return S_OK;
 }
 
-void DXVAVideoDecodeAccelerator::ConfigChanged(
-    const Config& config) {
+void DXVAVideoDecodeAccelerator::ConfigChanged(const Config& config) {
   DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
 
   SetState(kConfigChange);
@@ -2986,4 +2879,4 @@
                  base::Unretained(this)));
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/dxva_video_decode_accelerator_win.h b/media/gpu/dxva_video_decode_accelerator_win.h
similarity index 93%
rename from content/common/gpu/media/dxva_video_decode_accelerator_win.h
rename to media/gpu/dxva_video_decode_accelerator_win.h
index 10ad585b..3f67202 100644
--- a/content/common/gpu/media/dxva_video_decode_accelerator_win.h
+++ b/media/gpu/dxva_video_decode_accelerator_win.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_DXVA_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <d3d11.h>
 #include <d3d9.h>
@@ -13,7 +13,7 @@
 // Work around bug in this header by disabling the relevant warning for it.
 // https://connect.microsoft.com/VisualStudio/feedback/details/911260/dxva2api-h-in-win8-sdk-triggers-c4201-with-w4
 #pragma warning(push)
-#pragma warning(disable:4201)
+#pragma warning(disable : 4201)
 #include <dxva2api.h>
 #pragma warning(pop)
 #include <mfidl.h>
@@ -31,9 +31,9 @@
 #include "base/threading/non_thread_safe.h"
 #include "base/threading/thread.h"
 #include "base/win/scoped_comptr.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
 #include "media/filters/h264_parser.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 
 interface IMFSample;
@@ -43,11 +43,11 @@
 class GLContext;
 }
 
-typedef HRESULT (WINAPI* CreateDXGIDeviceManager)(
+typedef HRESULT(WINAPI* CreateDXGIDeviceManager)(
     UINT* reset_token,
     IMFDXGIDeviceManager** device_manager);
 
-namespace content {
+namespace media {
 
 // Provides functionality to detect H.264 stream configuration changes.
 // TODO(ananta)
@@ -61,9 +61,7 @@
   // Returns false on failure.
   bool DetectConfig(const uint8_t* stream, unsigned int size);
 
-  bool config_changed() const {
-    return config_changed_;
-  }
+  bool config_changed() const { return config_changed_; }
 
  private:
   // These fields are used to track the SPS/PPS in the H.264 bitstream and
@@ -85,21 +83,20 @@
   DISALLOW_COPY_AND_ASSIGN(H264ConfigChangeDetector);
 };
 
-
 // Class to provide a DXVA 2.0 based accelerator using the Microsoft Media
 // foundation APIs via the VideoDecodeAccelerator interface.
 // This class lives on a single thread and DCHECKs that it is never accessed
 // from any other.
-class CONTENT_EXPORT DXVAVideoDecodeAccelerator
+class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
     : public media::VideoDecodeAccelerator {
  public:
   enum State {
-    kUninitialized,               // un-initialized.
-    kNormal,                      // normal playing state.
-    kResetting,                   // upon received Reset(), before ResetDone()
-    kStopped,                     // upon output EOS received.
-    kFlushing,                    // upon flush request received.
-    kConfigChange,                // stream configuration change detected.
+    kUninitialized,  // un-initialized.
+    kNormal,         // normal playing state.
+    kResetting,      // upon received Reset(), before ResetDone()
+    kStopped,        // upon output EOS received.
+    kFlushing,       // upon flush request received.
+    kConfigChange,   // stream configuration change detected.
   };
 
   // Does not take ownership of |client| which must outlive |*this|.
@@ -125,7 +122,7 @@
   GLenum GetSurfaceInternalFormat() const override;
 
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedProfiles();
+  GetSupportedProfiles();
 
   // Preload dlls required for decoding.
   static void PreSandboxInitialization();
@@ -136,11 +133,11 @@
 
   // Returns the minimum resolution for the |profile| passed in.
   static std::pair<int, int> GetMinResolution(
-    const media::VideoCodecProfile profile);
+      const media::VideoCodecProfile profile);
 
   // Returns the maximum resolution for the |profile| passed in.
   static std::pair<int, int> GetMaxResolution(
-    const media::VideoCodecProfile profile);
+      const media::VideoCodecProfile profile);
 
   // Returns the maximum resolution for H264 video.
   static std::pair<int, int> GetMaxH264Resolution();
@@ -219,8 +216,7 @@
   void RequestPictureBuffers(int width, int height);
 
   // Notifies the client about the availability of a picture.
-  void NotifyPictureReady(int picture_buffer_id,
-                          int input_buffer_id);
+  void NotifyPictureReady(int picture_buffer_id, int input_buffer_id);
 
   // Sends pending input buffer processed acks to the client if we don't have
   // output samples waiting to be processed.
@@ -346,7 +342,7 @@
   base::win::ScopedComPtr<IDirect3DDeviceManager9> device_manager_;
   base::win::ScopedComPtr<IDirect3DQuery9> query_;
 
-  base::win::ScopedComPtr<ID3D11Device > d3d11_device_;
+  base::win::ScopedComPtr<ID3D11Device> d3d11_device_;
   base::win::ScopedComPtr<IMFDXGIDeviceManager> d3d11_device_manager_;
   base::win::ScopedComPtr<ID3D10Multithread> multi_threaded_;
   base::win::ScopedComPtr<ID3D11DeviceContext> d3d11_device_context_;
@@ -487,6 +483,6 @@
   DISALLOW_COPY_AND_ASSIGN(DXVAVideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_DXVA_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/fake_video_decode_accelerator.cc b/media/gpu/fake_video_decode_accelerator.cc
similarity index 87%
rename from content/common/gpu/media/fake_video_decode_accelerator.cc
rename to media/gpu/fake_video_decode_accelerator.cc
index cb59425..97b54906 100644
--- a/content/common/gpu/media/fake_video_decode_accelerator.cc
+++ b/media/gpu/fake_video_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/fake_video_decode_accelerator.h"
+#include "media/gpu/fake_video_decode_accelerator.h"
 
 #include <stddef.h>
 #include <string.h>
@@ -20,15 +20,15 @@
 #include "ui/gl/gl_surface_egl.h"
 #include "ui/gl/gl_surface_glx.h"
 
-namespace content {
+namespace media {
 
 static const uint32_t kDefaultTextureTarget = GL_TEXTURE_2D;
 // Must be at least 2 since the rendering helper will switch between textures
 // and if there is only one, it will wait for the next one that will never come.
 // Must also be an even number as otherwise there won't be the same amount of
 // white and black frames.
-static const unsigned int kNumBuffers = media::limits::kMaxVideoFrames +
-    (media::limits::kMaxVideoFrames & 1u);
+static const unsigned int kNumBuffers =
+    media::limits::kMaxVideoFrames + (media::limits::kMaxVideoFrames & 1u);
 
 FakeVideoDecodeAccelerator::FakeVideoDecodeAccelerator(
     const gfx::Size& size,
@@ -40,8 +40,7 @@
       flushing_(false),
       weak_this_factory_(this) {}
 
-FakeVideoDecodeAccelerator::~FakeVideoDecodeAccelerator() {
-}
+FakeVideoDecodeAccelerator::~FakeVideoDecodeAccelerator() {}
 
 bool FakeVideoDecodeAccelerator::Initialize(const Config& config,
                                             Client* client) {
@@ -86,20 +85,18 @@
 void FakeVideoDecodeAccelerator::AssignPictureBuffers(
     const std::vector<media::PictureBuffer>& buffers) {
   DCHECK(buffers.size() == kNumBuffers);
-  DCHECK(!(buffers.size()%2));
+  DCHECK(!(buffers.size() % 2));
 
   // Save buffers and mark all buffers as ready for use.
   std::unique_ptr<uint8_t[]> white_data(
       new uint8_t[frame_buffer_size_.width() * frame_buffer_size_.height() *
                   4]);
-  memset(white_data.get(),
-         UINT8_MAX,
+  memset(white_data.get(), UINT8_MAX,
          frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
   std::unique_ptr<uint8_t[]> black_data(
       new uint8_t[frame_buffer_size_.width() * frame_buffer_size_.height() *
                   4]);
-  memset(black_data.get(),
-         0,
+  memset(black_data.get(), 0,
          frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
   if (!make_context_current_cb_.Run()) {
     LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
@@ -110,14 +107,8 @@
     glBindTexture(GL_TEXTURE_2D, buffers[index].texture_ids()[0]);
     // Every other frame white and the rest black.
     uint8_t* data = index % 2 ? white_data.get() : black_data.get();
-    glTexImage2D(GL_TEXTURE_2D,
-                 0,
-                 GL_RGBA,
-                 frame_buffer_size_.width(),
-                 frame_buffer_size_.height(),
-                 0,
-                 GL_RGBA,
-                 GL_UNSIGNED_BYTE,
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, frame_buffer_size_.width(),
+                 frame_buffer_size_.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE,
                  data);
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
@@ -178,11 +169,8 @@
     int buffer_id = free_output_buffers_.front();
     free_output_buffers_.pop();
 
-    const media::Picture picture =
-        media::Picture(buffer_id,
-                       bitstream_id,
-                       gfx::Rect(frame_buffer_size_),
-                       false);
+    const media::Picture picture = media::Picture(
+        buffer_id, bitstream_id, gfx::Rect(frame_buffer_size_), false);
     client_->PictureReady(picture);
     // Bitstream no longer needed.
     client_->NotifyEndOfBitstreamBuffer(bitstream_id);
@@ -193,4 +181,4 @@
   }
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/fake_video_decode_accelerator.h b/media/gpu/fake_video_decode_accelerator.h
similarity index 83%
rename from content/common/gpu/media/fake_video_decode_accelerator.h
rename to media/gpu/fake_video_decode_accelerator.h
index 10d4782..b8940527 100644
--- a/content/common/gpu/media/fake_video_decode_accelerator.h
+++ b/media/gpu/fake_video_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_FAKE_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_FAKE_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_FAKE_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_FAKE_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <stdint.h>
 
@@ -12,15 +12,15 @@
 
 #include "base/macros.h"
 #include "base/memory/weak_ptr.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gfx/geometry/size_f.h"
 #include "ui/gl/gl_context.h"
 
-namespace content {
+namespace media {
 
-class CONTENT_EXPORT FakeVideoDecodeAccelerator
+class MEDIA_GPU_EXPORT FakeVideoDecodeAccelerator
     : public media::VideoDecodeAccelerator {
  public:
   FakeVideoDecodeAccelerator(
@@ -70,6 +70,6 @@
   DISALLOW_COPY_AND_ASSIGN(FakeVideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_FAKE_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_FAKE_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/generic_v4l2_device.cc b/media/gpu/generic_v4l2_device.cc
similarity index 90%
rename from content/common/gpu/media/generic_v4l2_device.cc
rename to media/gpu/generic_v4l2_device.cc
index 5438f15..57fe87a 100644
--- a/content/common/gpu/media/generic_v4l2_device.cc
+++ b/media/gpu/generic_v4l2_device.cc
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 //
 
-#include "content/common/gpu/media/generic_v4l2_device.h"
+#include "media/gpu/generic_v4l2_device.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -22,23 +22,24 @@
 #include "base/posix/eintr_wrapper.h"
 #include "base/trace_event/trace_event.h"
 #include "build/build_config.h"
+#include "media/gpu/generic_v4l2_device.h"
 #include "ui/gl/egl_util.h"
 #include "ui/gl/gl_bindings.h"
 
 #if defined(USE_LIBV4L2)
 // Auto-generated for dlopen libv4l2 libraries
-#include "content/common/gpu/media/v4l2_stubs.h"
+#include "media/gpu/v4l2_stubs.h"
 #include "third_party/v4l-utils/lib/include/libv4l2.h"
 
-using content_common_gpu_media::kModuleV4l2;
-using content_common_gpu_media::InitializeStubs;
-using content_common_gpu_media::StubPathMap;
+using media_gpu::kModuleV4l2;
+using media_gpu::InitializeStubs;
+using media_gpu::StubPathMap;
 
 static const base::FilePath::CharType kV4l2Lib[] =
     FILE_PATH_LITERAL("/usr/lib/libv4l2.so");
 #endif
 
-namespace content {
+namespace media {
 
 namespace {
 const char kDecoderDevice[] = "/dev/video-dec";
@@ -48,9 +49,7 @@
 }
 
 GenericV4L2Device::GenericV4L2Device(Type type)
-    : V4L2Device(type),
-      use_libv4l2_(false) {
-}
+    : V4L2Device(type), use_libv4l2_(false) {}
 
 GenericV4L2Device::~GenericV4L2Device() {
 #if defined(USE_LIBV4L2)
@@ -93,10 +92,10 @@
 }
 
 void* GenericV4L2Device::Mmap(void* addr,
-                             unsigned int len,
-                             int prot,
-                             int flags,
-                             unsigned int offset) {
+                              unsigned int len,
+                              int prot,
+                              int flags,
+                              unsigned int offset) {
   return mmap(addr, len, prot, flags, device_fd_.get(), offset);
 }
 
@@ -213,10 +212,10 @@
   };
 
   return std::find(
-      kEGLImageDrmFmtsSupported,
-      kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported),
-      V4L2PixFmtToDrmFormat(v4l2_pixfmt)) !=
-      kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported);
+             kEGLImageDrmFmtsSupported,
+             kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported),
+             V4L2PixFmtToDrmFormat(v4l2_pixfmt)) !=
+         kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported);
 }
 
 EGLImageKHR GenericV4L2Device::CreateEGLImage(
@@ -294,11 +293,13 @@
 }
 
 EGLBoolean GenericV4L2Device::DestroyEGLImage(EGLDisplay egl_display,
-                                             EGLImageKHR egl_image) {
+                                              EGLImageKHR egl_image) {
   return eglDestroyImageKHR(egl_display, egl_image);
 }
 
-GLenum GenericV4L2Device::GetTextureTarget() { return GL_TEXTURE_EXTERNAL_OES; }
+GLenum GenericV4L2Device::GetTextureTarget() {
+  return GL_TEXTURE_EXTERNAL_OES;
+}
 
 uint32_t GenericV4L2Device::PreferredInputFormat() {
   // TODO(posciak): We should support "dontcare" returns here once we
@@ -319,4 +320,4 @@
 #endif
 }
 
-}  //  namespace content
+}  //  namespace media
diff --git a/content/common/gpu/media/generic_v4l2_device.h b/media/gpu/generic_v4l2_device.h
similarity index 88%
rename from content/common/gpu/media/generic_v4l2_device.h
rename to media/gpu/generic_v4l2_device.h
index a5f787b..180e2e3 100644
--- a/content/common/gpu/media/generic_v4l2_device.h
+++ b/media/gpu/generic_v4l2_device.h
@@ -5,17 +5,17 @@
 // This file contains the implementation of GenericV4L2Device used on
 // platforms, which provide generic V4L2 video codec devices.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GENERIC_V4L2_DEVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_GENERIC_V4L2_DEVICE_H_
+#ifndef MEDIA_GPU_GENERIC_V4L2_DEVICE_H_
+#define MEDIA_GPU_GENERIC_V4L2_DEVICE_H_
 
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/files/scoped_file.h"
 #include "base/macros.h"
-#include "content/common/gpu/media/v4l2_device.h"
+#include "media/gpu/v4l2_device.h"
 
-namespace content {
+namespace media {
 
 class GenericV4L2Device : public V4L2Device {
  public:
@@ -73,6 +73,6 @@
   // init failure.
   static bool PostSandboxInitialization();
 };
-}  //  namespace content
+}  //  namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GENERIC_V4L2_DEVICE_H_
+#endif  // MEDIA_GPU_GENERIC_V4L2_DEVICE_H_
diff --git a/media/gpu/ipc/common/gpu_video_accelerator_util.cc b/media/gpu/gpu_video_accelerator_util.cc
similarity index 98%
rename from media/gpu/ipc/common/gpu_video_accelerator_util.cc
rename to media/gpu/gpu_video_accelerator_util.cc
index 9830a8d..1dca1ddc 100644
--- a/media/gpu/ipc/common/gpu_video_accelerator_util.cc
+++ b/media/gpu/gpu_video_accelerator_util.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
 
 namespace media {
 
diff --git a/media/gpu/ipc/common/gpu_video_accelerator_util.h b/media/gpu/gpu_video_accelerator_util.h
similarity index 90%
rename from media/gpu/ipc/common/gpu_video_accelerator_util.h
rename to media/gpu/gpu_video_accelerator_util.h
index 1d940946..8cf8c12b 100644
--- a/media/gpu/ipc/common/gpu_video_accelerator_util.h
+++ b/media/gpu/gpu_video_accelerator_util.h
@@ -2,18 +2,19 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef MEDIA_GPU_IPC_COMMON_GPU_VIDEO_ACCELERATOR_UTIL_H_
-#define MEDIA_GPU_IPC_COMMON_GPU_VIDEO_ACCELERATOR_UTIL_H_
+#ifndef MEDIA_GPU_GPU_VIDEO_ACCELERATOR_UTIL_H_
+#define MEDIA_GPU_GPU_VIDEO_ACCELERATOR_UTIL_H_
 
 #include <vector>
 
 #include "gpu/config/gpu_info.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 #include "media/video/video_encode_accelerator.h"
 
 namespace media {
 
-class GpuVideoAcceleratorUtil {
+class MEDIA_GPU_EXPORT GpuVideoAcceleratorUtil {
  public:
   // Convert decoder gpu capabilities to media capabilities.
   static VideoDecodeAccelerator::Capabilities
@@ -60,4 +61,4 @@
 
 }  // namespace media
 
-#endif  // MEDIA_GPU_IPC_COMMON_GPU_VIDEO_ACCELERATOR_UTIL_H_
+#endif  // MEDIA_GPU_GPU_VIDEO_ACCELERATOR_UTIL_H_
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc b/media/gpu/gpu_video_decode_accelerator_factory_impl.cc
similarity index 84%
rename from content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
rename to media/gpu/gpu_video_decode_accelerator_factory_impl.cc
index e4f3a35..dee5b6a 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
+++ b/media/gpu/gpu_video_decode_accelerator_factory_impl.cc
@@ -2,37 +2,37 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
+#include "media/gpu/gpu_video_decode_accelerator_factory_impl.h"
 
 #include "base/memory/ptr_util.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
 #include "gpu/command_buffer/service/gpu_preferences.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
+#include "media/gpu/media_gpu_export.h"
 
 #if defined(OS_WIN)
 #include "base/win/windows_version.h"
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#include "media/gpu/dxva_video_decode_accelerator_win.h"
 #elif defined(OS_MACOSX)
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
+#include "media/gpu/vt_video_decode_accelerator_mac.h"
 #elif defined(OS_CHROMEOS)
 #if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
-#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_slice_video_decode_accelerator.h"
+#include "media/gpu/v4l2_video_decode_accelerator.h"
 #include "ui/gl/gl_surface_egl.h"
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
+#include "media/gpu/vaapi_video_decode_accelerator.h"
 #include "ui/gl/gl_implementation.h"
 #endif
 #elif defined(OS_ANDROID)
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#include "media/gpu/android_video_decode_accelerator.h"
 #endif
 
-namespace content {
+namespace media {
 
 // static
-std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+MEDIA_GPU_EXPORT std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
 GpuVideoDecodeAcceleratorFactoryImpl::Create(
     const GetGLContextCallback& get_gl_context_cb,
     const MakeGLContextCurrentCallback& make_context_current_cb,
@@ -43,7 +43,7 @@
 }
 
 // static
-std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+MEDIA_GPU_EXPORT std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
     const GetGLContextCallback& get_gl_context_cb,
     const MakeGLContextCurrentCallback& make_context_current_cb,
@@ -55,27 +55,27 @@
 }
 
 // static
-std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+MEDIA_GPU_EXPORT std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateWithNoGL() {
   return Create(GetGLContextCallback(), MakeGLContextCurrentCallback(),
                 BindGLImageCallback());
 }
 
 // static
-gpu::VideoDecodeAcceleratorCapabilities
+MEDIA_GPU_EXPORT gpu::VideoDecodeAcceleratorCapabilities
 GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
     const gpu::GpuPreferences& gpu_preferences) {
   media::VideoDecodeAccelerator::Capabilities capabilities;
   if (gpu_preferences.disable_accelerated_video_decode)
     return gpu::VideoDecodeAcceleratorCapabilities();
 
-  // Query VDAs for their capabilities and construct a set of supported
-  // profiles for current platform. This must be done in the same order as in
-  // CreateVDA(), as we currently preserve additional capabilities (such as
-  // resolutions supported) only for the first VDA supporting the given codec
-  // profile (instead of calculating a superset).
-  // TODO(posciak,henryhsu): improve this so that we choose a superset of
-  // resolutions and other supported profile parameters.
+// Query VDAs for their capabilities and construct a set of supported
+// profiles for current platform. This must be done in the same order as in
+// CreateVDA(), as we currently preserve additional capabilities (such as
+// resolutions supported) only for the first VDA supporting the given codec
+// profile (instead of calculating a superset).
+// TODO(posciak,henryhsu): improve this so that we choose a superset of
+// resolutions and other supported profile parameters.
 #if defined(OS_WIN)
   capabilities.supported_profiles =
       DXVAVideoDecodeAccelerator::GetSupportedProfiles();
@@ -105,7 +105,7 @@
       capabilities);
 }
 
-std::unique_ptr<media::VideoDecodeAccelerator>
+MEDIA_GPU_EXPORT std::unique_ptr<media::VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateVDA(
     media::VideoDecodeAccelerator::Client* client,
     const media::VideoDecodeAccelerator::Config& config,
@@ -240,4 +240,4 @@
 
 GpuVideoDecodeAcceleratorFactoryImpl::~GpuVideoDecodeAcceleratorFactoryImpl() {}
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h b/media/gpu/gpu_video_decode_accelerator_factory_impl.h
similarity index 91%
rename from content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
rename to media/gpu/gpu_video_decode_accelerator_factory_impl.h
index ea4c58c..a005a1c 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
+++ b/media/gpu/gpu_video_decode_accelerator_factory_impl.h
@@ -2,16 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+#ifndef MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+#define MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
 
 #include <memory>
 
 #include "base/callback.h"
 #include "base/threading/thread_checker.h"
-#include "content/common/content_export.h"
 #include "gpu/command_buffer/service/gpu_preferences.h"
 #include "gpu/config/gpu_info.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 
 namespace gfx {
@@ -30,13 +30,13 @@
 }
 }
 
-namespace content {
+namespace media {
 
 // TODO(posciak): this class should be an implementation of
 // content::GpuVideoDecodeAcceleratorFactory, however that can only be achieved
 // once this is moved out of content/common, see crbug.com/597150 and related.
-class CONTENT_EXPORT GpuVideoDecodeAcceleratorFactoryImpl {
-public:
+class MEDIA_GPU_EXPORT GpuVideoDecodeAcceleratorFactoryImpl {
+ public:
   ~GpuVideoDecodeAcceleratorFactoryImpl();
 
   // Return current GLContext.
@@ -122,6 +122,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoDecodeAcceleratorFactoryImpl);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+#endif  // MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h b/media/gpu/gpu_video_decode_accelerator_helpers.h
similarity index 86%
rename from content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
rename to media/gpu/gpu_video_decode_accelerator_helpers.h
index 1717f592..9d3fda58 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
+++ b/media/gpu/gpu_video_decode_accelerator_helpers.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+#ifndef MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+#define MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
 
 #include "base/callback.h"
 #include "base/memory/weak_ptr.h"
@@ -22,7 +22,7 @@
 }
 }
 
-namespace content {
+namespace media {
 
 // Helpers/defines for specific VideoDecodeAccelerator implementations in GPU
 // process. Which callbacks are required depends on the implementation.
@@ -54,6 +54,6 @@
 using GetGLES2DecoderCallback =
     base::Callback<base::WeakPtr<gpu::gles2::GLES2Decoder>(void)>;
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+#endif  // MEDIA_GPU_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
diff --git a/content/common/gpu/media/h264_decoder.cc b/media/gpu/h264_decoder.cc
similarity index 96%
rename from content/common/gpu/media/h264_decoder.cc
rename to media/gpu/h264_decoder.cc
index f8504f3..8bb39d8 100644
--- a/content/common/gpu/media/h264_decoder.cc
+++ b/media/gpu/h264_decoder.cc
@@ -11,15 +11,13 @@
 #include "base/macros.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/stl_util.h"
-#include "content/common/gpu/media/h264_decoder.h"
+#include "media/gpu/h264_decoder.h"
 
-namespace content {
+namespace media {
 
-H264Decoder::H264Accelerator::H264Accelerator() {
-}
+H264Decoder::H264Accelerator::H264Accelerator() {}
 
-H264Decoder::H264Accelerator::~H264Accelerator() {
-}
+H264Decoder::H264Accelerator::~H264Accelerator() {}
 
 H264Decoder::H264Decoder(H264Accelerator* accelerator)
     : max_frame_num_(0),
@@ -32,8 +30,7 @@
   state_ = kNeedStreamMetadata;
 }
 
-H264Decoder::~H264Decoder() {
-}
+H264Decoder::~H264Decoder() {}
 
 void H264Decoder::Reset() {
   curr_pic_ = nullptr;
@@ -174,7 +171,7 @@
   // purpose.
   if (slice_hdr->adaptive_ref_pic_marking_mode_flag) {
     static_assert(sizeof(curr_pic_->ref_pic_marking) ==
-                  sizeof(slice_hdr->ref_pic_marking),
+                      sizeof(slice_hdr->ref_pic_marking),
                   "Array sizes of ref pic marking do not match.");
     memcpy(curr_pic_->ref_pic_marking, slice_hdr->ref_pic_marking,
            sizeof(curr_pic_->ref_pic_marking));
@@ -271,13 +268,13 @@
           return false;
         }
 
-        int pic_order_cnt_cycle_cnt = (abs_frame_num - 1) /
-            sps->num_ref_frames_in_pic_order_cnt_cycle;
-        int frame_num_in_pic_order_cnt_cycle = (abs_frame_num - 1) %
-            sps->num_ref_frames_in_pic_order_cnt_cycle;
+        int pic_order_cnt_cycle_cnt =
+            (abs_frame_num - 1) / sps->num_ref_frames_in_pic_order_cnt_cycle;
+        int frame_num_in_pic_order_cnt_cycle =
+            (abs_frame_num - 1) % sps->num_ref_frames_in_pic_order_cnt_cycle;
 
         expected_pic_order_cnt = pic_order_cnt_cycle_cnt *
-            sps->expected_delta_per_pic_order_cnt_cycle;
+                                 sps->expected_delta_per_pic_order_cnt_cycle;
         // frame_num_in_pic_order_cnt_cycle is verified < 255 in parser
         for (int i = 0; i <= frame_num_in_pic_order_cnt_cycle; ++i)
           expected_pic_order_cnt += sps->offset_for_ref_frame[i];
@@ -537,14 +534,12 @@
   if (list == 0) {
     ref_pic_list_modification_flag_lX =
         slice_hdr->ref_pic_list_modification_flag_l0;
-    num_ref_idx_lX_active_minus1 =
-        slice_hdr->num_ref_idx_l0_active_minus1;
+    num_ref_idx_lX_active_minus1 = slice_hdr->num_ref_idx_l0_active_minus1;
     list_mod = slice_hdr->ref_list_l0_modifications;
   } else {
     ref_pic_list_modification_flag_lX =
         slice_hdr->ref_pic_list_modification_flag_l1;
-    num_ref_idx_lX_active_minus1 =
-        slice_hdr->num_ref_idx_l1_active_minus1;
+    num_ref_idx_lX_active_minus1 = slice_hdr->num_ref_idx_l1_active_minus1;
     list_mod = slice_hdr->ref_list_l1_modifications;
   }
 
@@ -574,7 +569,8 @@
         // Modify short reference picture position.
         if (list_mod->modification_of_pic_nums_idc == 0) {
           // Subtract given value from predicted PicNum.
-          pic_num_lx_no_wrap = pic_num_lx_pred -
+          pic_num_lx_no_wrap =
+              pic_num_lx_pred -
               (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1);
           // Wrap around max_pic_num_ if it becomes < 0 as result
           // of subtraction.
@@ -582,7 +578,8 @@
             pic_num_lx_no_wrap += max_pic_num_;
         } else {
           // Add given value to predicted PicNum.
-          pic_num_lx_no_wrap = pic_num_lx_pred +
+          pic_num_lx_no_wrap =
+              pic_num_lx_pred +
               (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1);
           // Wrap around max_pic_num_ if it becomes >= max_pic_num_ as result
           // of the addition.
@@ -646,8 +643,8 @@
       default:
         // May be recoverable.
         DVLOG(1) << "Invalid modification_of_pic_nums_idc="
-                 << list_mod->modification_of_pic_nums_idc
-                 << " in position " << i;
+                 << list_mod->modification_of_pic_nums_idc << " in position "
+                 << i;
         break;
     }
 
@@ -672,8 +669,8 @@
   }
 
   DVLOG_IF(1, pic->pic_order_cnt < last_output_poc_)
-      << "Outputting out of order, likely a broken stream: "
-      << last_output_poc_ << " -> " << pic->pic_order_cnt;
+      << "Outputting out of order, likely a broken stream: " << last_output_poc_
+      << " -> " << pic->pic_order_cnt;
   last_output_poc_ = pic->pic_order_cnt;
 
   DVLOG(4) << "Posting output task for POC: " << pic->pic_order_cnt;
@@ -1007,22 +1004,33 @@
 static int LevelToMaxDpbMbs(int level) {
   // See table A-1 in spec.
   switch (level) {
-    case 10: return 396;
-    case 11: return 900;
-    case 12: //  fallthrough
-    case 13: //  fallthrough
-    case 20: return 2376;
-    case 21: return 4752;
-    case 22: //  fallthrough
-    case 30: return 8100;
-    case 31: return 18000;
-    case 32: return 20480;
-    case 40: //  fallthrough
-    case 41: return 32768;
-    case 42: return 34816;
-    case 50: return 110400;
-    case 51: //  fallthrough
-    case 52: return 184320;
+    case 10:
+      return 396;
+    case 11:
+      return 900;
+    case 12:  //  fallthrough
+    case 13:  //  fallthrough
+    case 20:
+      return 2376;
+    case 21:
+      return 4752;
+    case 22:  //  fallthrough
+    case 30:
+      return 8100;
+    case 31:
+      return 18000;
+    case 32:
+      return 20480;
+    case 40:  //  fallthrough
+    case 41:
+      return 32768;
+    case 42:
+      return 34816;
+    case 50:
+      return 110400;
+    case 51:  //  fallthrough
+    case 52:
+      return 184320;
     default:
       DVLOG(1) << "Invalid codec level (" << level << ")";
       return 0;
@@ -1311,7 +1319,7 @@
         // We can't resume from a non-IDR slice.
         if (state_ != kDecoding)
           break;
-        // else fallthrough
+      // else fallthrough
       case media::H264NALU::kIDRSlice: {
         // TODO(posciak): the IDR may require an SPS that we don't have
         // available. For now we'd fail if that happens, but ideally we'd like
@@ -1427,4 +1435,4 @@
   return dpb_.max_num_pics() + kPicsInPipeline;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/h264_decoder.h b/media/gpu/h264_decoder.h
similarity index 95%
rename from content/common/gpu/media/h264_decoder.h
rename to media/gpu/h264_decoder.h
index 51c4f7d..fc0e6f8 100644
--- a/content/common/gpu/media/h264_decoder.h
+++ b/media/gpu/h264_decoder.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_H264_DECODER_H_
-#define CONTENT_COMMON_GPU_MEDIA_H264_DECODER_H_
+#ifndef MEDIA_GPU_H264_DECODER_H_
+#define MEDIA_GPU_H264_DECODER_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -13,14 +13,14 @@
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/accelerated_video_decoder.h"
-#include "content/common/gpu/media/h264_dpb.h"
 #include "media/base/limits.h"
 #include "media/filters/h264_parser.h"
+#include "media/gpu/accelerated_video_decoder.h"
+#include "media/gpu/h264_dpb.h"
+#include "media/gpu/media_gpu_export.h"
 #include "ui/gfx/geometry/size.h"
 
-namespace content {
+namespace media {
 
 // Clients of this class are expected to pass H264 Annex-B byte stream
 // and are expected to provide an implementation of H264Accelerator for
@@ -28,9 +28,9 @@
 //
 // This class must be created, called and destroyed on a single thread, and
 // does nothing internally on any other thread.
-class CONTENT_EXPORT H264Decoder : public AcceleratedVideoDecoder {
+class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
  public:
-  class CONTENT_EXPORT H264Accelerator {
+  class MEDIA_GPU_EXPORT H264Accelerator {
    public:
     H264Accelerator();
     virtual ~H264Accelerator();
@@ -103,7 +103,7 @@
   H264Decoder(H264Accelerator* accelerator);
   ~H264Decoder() override;
 
-  // content::AcceleratedVideoDecoder implementation.
+  // media::AcceleratedVideoDecoder implementation.
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
   void SetStream(const uint8_t* ptr, size_t size) override;
@@ -275,6 +275,6 @@
   DISALLOW_COPY_AND_ASSIGN(H264Decoder);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_H264_DECODER_H_
+#endif  // MEDIA_GPU_H264_DECODER_H_
diff --git a/content/common/gpu/media/h264_dpb.cc b/media/gpu/h264_dpb.cc
similarity index 94%
rename from content/common/gpu/media/h264_dpb.cc
rename to media/gpu/h264_dpb.cc
index 56bdd8d..ea1c4db8 100644
--- a/content/common/gpu/media/h264_dpb.cc
+++ b/media/gpu/h264_dpb.cc
@@ -8,9 +8,9 @@
 
 #include "base/logging.h"
 #include "base/stl_util.h"
-#include "content/common/gpu/media/h264_dpb.h"
+#include "media/gpu/h264_dpb.h"
 
-namespace content {
+namespace media {
 
 H264Picture::H264Picture()
     : pic_order_cnt_type(0),
@@ -44,8 +44,7 @@
   memset(&ref_pic_marking, 0, sizeof(ref_pic_marking));
 }
 
-H264Picture::~H264Picture() {
-}
+H264Picture::~H264Picture() {}
 
 V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
   return nullptr;
@@ -78,8 +77,8 @@
 }
 
 void H264DPB::DeleteByPOC(int poc) {
-  for (H264Picture::Vector::iterator it = pics_.begin();
-       it != pics_.end(); ++it) {
+  for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end();
+       ++it) {
     if ((*it)->pic_order_cnt == poc) {
       pics_.erase(it);
       UpdatePicPositions();
@@ -90,7 +89,7 @@
 }
 
 void H264DPB::DeleteUnused() {
-  for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end(); ) {
+  for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end();) {
     if ((*it)->outputted && !(*it)->ref)
       it = pics_.erase(it);
     else
@@ -172,4 +171,4 @@
   }
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/h264_dpb.h b/media/gpu/h264_dpb.h
similarity index 94%
rename from content/common/gpu/media/h264_dpb.h
rename to media/gpu/h264_dpb.h
index 9c67538..7e473de 100644
--- a/content/common/gpu/media/h264_dpb.h
+++ b/media/gpu/h264_dpb.h
@@ -5,8 +5,8 @@
 // This file contains an implementation of an H.264 Decoded Picture Buffer
 // used in H264 decoders.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
-#define CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
+#ifndef MEDIA_GPU_H264_DPB_H_
+#define MEDIA_GPU_H264_DPB_H_
 
 #include <stddef.h>
 
@@ -16,7 +16,7 @@
 #include "base/memory/ref_counted.h"
 #include "media/filters/h264_parser.h"
 
-namespace content {
+namespace media {
 
 class V4L2H264Picture;
 class VaapiH264Picture;
@@ -60,9 +60,9 @@
 
   media::H264SliceHeader::Type type;
   int nal_ref_idc;
-  bool idr;  // IDR picture?
+  bool idr;        // IDR picture?
   int idr_pic_id;  // Valid only if idr == true.
-  bool ref;  // reference picture?
+  bool ref;        // reference picture?
   bool long_term;  // long term reference picture?
   bool outputted;
   // Does memory management op 5 needs to be executed after this
@@ -160,7 +160,9 @@
   bool IsFull() const { return pics_.size() == max_num_pics_; }
 
   // Per H264 spec, increase to 32 if interlaced video is supported.
-  enum { kDPBMaxSize = 16, };
+  enum {
+    kDPBMaxSize = 16,
+  };
 
  private:
   void UpdatePicPositions();
@@ -171,6 +173,6 @@
   DISALLOW_COPY_AND_ASSIGN(H264DPB);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
+#endif  // MEDIA_GPU_H264_DPB_H_
diff --git a/media/gpu/ipc/client/BUILD.gn b/media/gpu/ipc/client/BUILD.gn
index a14cdef..53ec731c 100644
--- a/media/gpu/ipc/client/BUILD.gn
+++ b/media/gpu/ipc/client/BUILD.gn
@@ -20,6 +20,7 @@
     "//ipc",
     "//media",
     "//media:media_features",
+    "//media/gpu",
     "//ui/gfx:memory_buffer",
     "//ui/gfx/geometry",
     "//ui/gfx/ipc",
diff --git a/media/gpu/ipc/client/gpu_video_encode_accelerator_host.cc b/media/gpu/ipc/client/gpu_video_encode_accelerator_host.cc
index 8a8ea08..0d3cae9 100644
--- a/media/gpu/ipc/client/gpu_video_encode_accelerator_host.cc
+++ b/media/gpu/ipc/client/gpu_video_encode_accelerator_host.cc
@@ -9,7 +9,7 @@
 #include "base/thread_task_runner_handle.h"
 #include "gpu/ipc/client/gpu_channel_host.h"
 #include "media/base/video_frame.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
 #include "media/gpu/ipc/common/media_messages.h"
 #include "media/video/video_encode_accelerator.h"
 #include "ui/gfx/gpu_memory_buffer.h"
diff --git a/media/gpu/ipc/common/BUILD.gn b/media/gpu/ipc/common/BUILD.gn
index 6ada079..47479f9 100644
--- a/media/gpu/ipc/common/BUILD.gn
+++ b/media/gpu/ipc/common/BUILD.gn
@@ -6,8 +6,6 @@
   sources = [
     "create_video_encoder_params.cc",
     "create_video_encoder_params.h",
-    "gpu_video_accelerator_util.cc",
-    "gpu_video_accelerator_util.h",
     "media_message_generator.cc",
     "media_message_generator.h",
     "media_messages.cc",
diff --git a/media/gpu/ipc/media_ipc.gyp b/media/gpu/ipc/media_ipc.gyp
index 3c4c1c3..fc81008be 100644
--- a/media/gpu/ipc/media_ipc.gyp
+++ b/media/gpu/ipc/media_ipc.gyp
@@ -16,14 +16,13 @@
         '../../../ipc/ipc.gyp:ipc',
         '../../../ui/gfx/gfx.gyp:gfx',
         '../../../ui/gfx/gfx.gyp:gfx_geometry',
+        '../../../ui/gfx/ipc/geometry/gfx_ipc_geometry.gyp:gfx_ipc_geometry',
         '../../../ui/gfx/ipc/gfx_ipc.gyp:gfx_ipc',
       ],
       # This sources list is duplicated in //media/gpu/ipc/common/BUILD.gn
       'sources': [
         'common/create_video_encoder_params.cc',
         'common/create_video_encoder_params.h',
-        'common/gpu_video_accelerator_util.cc',
-        'common/gpu_video_accelerator_util.h',
         'common/media_message_generator.cc',
         'common/media_message_generator.h',
         'common/media_messages.cc',
@@ -40,6 +39,7 @@
       'dependencies': [
         '../../media.gyp:media',
         '../../media.gyp:media_features',
+        '../../media.gyp:media_gpu',
         '../../../base/base.gyp:base',
         '../../../gpu/gpu.gyp:gpu_ipc_common',
         '../../../ipc/ipc.gyp:ipc',
@@ -57,7 +57,49 @@
         'client/gpu_video_encode_accelerator_host.h',
       ],
       # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
-      'msvs_disabled_warnings': [4267, ],
+      'msvs_disabled_warnings': [ 4267 ],
     },
+    {
+      # GN version: //media/gpu/ipc/service
+      'target_name': 'media_gpu_ipc_service',
+      'type': 'static_library',
+      'dependencies': [
+        '../../../base/base.gyp:base',
+        '../../../gpu/gpu.gyp:gpu_config',
+        '../../../gpu/gpu.gyp:gpu_ipc_service',
+        '../../../ipc/ipc.gyp:ipc',
+        '../../../third_party/mesa/mesa.gyp:mesa_headers',
+        '../../media.gyp:media',
+        '../../media.gyp:media_gpu',
+        'media_gpu_ipc_common',
+      ],
+      'sources': [
+        'service/gpu_jpeg_decode_accelerator.cc',
+        'service/gpu_jpeg_decode_accelerator.h',
+        'service/gpu_video_decode_accelerator.cc',
+        'service/gpu_video_decode_accelerator.h',
+        'service/gpu_video_encode_accelerator.cc',
+        'service/gpu_video_encode_accelerator.h',
+        'service/media_channel.cc',
+        'service/media_channel.h',
+        'service/media_service.cc',
+        'service/media_service.h',
+      ],
+      'include_dirs': [
+        '<(DEPTH)/third_party/libva',
+        '<(DEPTH)/third_party/mesa/src/include',
+      ],
+      'conditions': [
+        ['OS == "win" and target_arch == "x64"', {
+          'msvs_settings': {
+            'VCCLCompilerTool': {
+              'AdditionalOptions': [
+                '/wd4267', # Conversion from 'size_t' to 'type', possible loss of data
+              ],
+            },
+          },
+        }],
+      ],
+    }
   ]
 }
diff --git a/media/gpu/ipc/service/BUILD.gn b/media/gpu/ipc/service/BUILD.gn
new file mode 100644
index 0000000..d8ffd04
--- /dev/null
+++ b/media/gpu/ipc/service/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("service") {
+  visibility = [
+    "//media/*",
+    "//content/gpu/*",
+    "//content/public/gpu/*",
+  ]
+
+  sources = [
+    "gpu_jpeg_decode_accelerator.cc",
+    "gpu_jpeg_decode_accelerator.h",
+    "gpu_video_decode_accelerator.cc",
+    "gpu_video_decode_accelerator.h",
+    "gpu_video_encode_accelerator.cc",
+    "gpu_video_encode_accelerator.h",
+    "media_channel.cc",
+    "media_channel.h",
+    "media_service.cc",
+    "media_service.h",
+  ]
+
+  include_dirs = [ "//third_party/mesa/src/include" ]
+
+  public_deps = [
+    "//base",
+    "//gpu/config",
+    "//ipc",
+    "//media",
+    "//media/gpu",
+  ]
+  deps = [
+    "//gpu/ipc/service",
+    "//media/gpu/ipc/common",
+    "//third_party/mesa:mesa_headers",
+  ]
+
+  if (is_chromeos && current_cpu != "arm") {
+    configs += [ "//third_party/libva:libva_config" ]
+  }
+
+  if (is_win) {
+    configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+  }
+}
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
similarity index 95%
rename from content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
rename to media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
index 0078317..4dbc35f 100644
--- a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h"
 
 #include <stdint.h>
 
@@ -27,11 +27,11 @@
 
 #if defined(OS_CHROMEOS)
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h"
+#include "media/gpu/vaapi_jpeg_decode_accelerator.h"
 #endif
 #if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_jpeg_decode_accelerator.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_jpeg_decode_accelerator.h"
 #endif
 #endif
 
@@ -70,13 +70,13 @@
 
 }  // namespace
 
-namespace content {
+namespace media {
 
 class GpuJpegDecodeAccelerator::Client
     : public media::JpegDecodeAccelerator::Client,
       public base::NonThreadSafe {
  public:
-  Client(content::GpuJpegDecodeAccelerator* owner, int32_t route_id)
+  Client(media::GpuJpegDecodeAccelerator* owner, int32_t route_id)
       : owner_(owner->AsWeakPtr()), route_id_(route_id) {}
 
   ~Client() override { DCHECK(CalledOnValidThread()); }
@@ -110,7 +110,7 @@
   }
 
  private:
-  base::WeakPtr<content::GpuJpegDecodeAccelerator> owner_;
+  base::WeakPtr<media::GpuJpegDecodeAccelerator> owner_;
   int32_t route_id_;
   std::unique_ptr<media::JpegDecodeAccelerator> accelerator_;
 };
@@ -301,8 +301,7 @@
     : channel_(channel),
       child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
       io_task_runner_(io_task_runner),
-      client_number_(0) {
-}
+      client_number_(0) {}
 
 GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() {
   DCHECK(CalledOnValidThread());
@@ -390,8 +389,8 @@
     const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) {
   std::unique_ptr<media::JpegDecodeAccelerator> decoder;
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
-  scoped_refptr<V4L2Device> device = V4L2Device::Create(
-      V4L2Device::kJpegDecoder);
+  scoped_refptr<V4L2Device> device =
+      V4L2Device::Create(V4L2Device::kJpegDecoder);
   if (device)
     decoder.reset(new V4L2JpegDecodeAccelerator(device, io_task_runner));
 #endif
@@ -424,4 +423,4 @@
   return false;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.h b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
similarity index 91%
rename from content/common/gpu/media/gpu_jpeg_decode_accelerator.h
rename to media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
index a2ac643..0de3033 100644
--- a/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_JPEG_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_JPEG_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_IPC_SERVICE_GPU_JPEG_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_IPC_SERVICE_GPU_JPEG_DECODE_ACCELERATOR_H_
 
 #include <stdint.h>
 
@@ -26,7 +26,7 @@
 class GpuChannel;
 }
 
-namespace content {
+namespace media {
 class GpuJpegDecodeAccelerator
     : public IPC::Sender,
       public base::NonThreadSafe,
@@ -85,6 +85,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(GpuJpegDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GPU_JPEG_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_IPC_SERVICE_GPU_JPEG_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.cc b/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
similarity index 95%
rename from content/common/gpu/media/gpu_video_decode_accelerator.cc
rename to media/gpu/ipc/service/gpu_video_decode_accelerator.cc
index f35291e4..6e6ba31 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
@@ -2,7 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+// TODO(markdittmer): This shouldn't be necessary.
+// Ensure that X11 headers don't poison environment before macro headers are
+// loaded.
+//
+// In particular, X11/Xlib.h defines "Status", which is the name of an enum
+// defined in "base/tracked_objects.h", which is eventually required by
+// "ipc/ipc_message_macros.h".
+#include "ipc/ipc_message_macros.h"
+
+#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
 
 #include <memory>
 #include <vector>
@@ -15,22 +24,21 @@
 #include "base/stl_util.h"
 #include "base/thread_task_runner_handle.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
 #include "gpu/command_buffer/common/command_buffer.h"
 #include "gpu/command_buffer/service/gpu_preferences.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "gpu/ipc/service/gpu_channel_manager.h"
-#include "ipc/ipc_message_macros.h"
 #include "ipc/ipc_message_utils.h"
 #include "ipc/message_filter.h"
 #include "media/base/limits.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_decode_accelerator_factory_impl.h"
 #include "media/gpu/ipc/common/media_messages.h"
 #include "ui/gfx/geometry/size.h"
 #include "ui/gl/gl_context.h"
 #include "ui/gl/gl_image.h"
 
-namespace content {
+namespace media {
 
 namespace {
 static gfx::GLContext* GetGLContext(
@@ -251,8 +259,7 @@
   uncleared_textures_.erase(picture_buffer_id);
 }
 
-void GpuVideoDecodeAccelerator::PictureReady(
-    const media::Picture& picture) {
+void GpuVideoDecodeAccelerator::PictureReady(const media::Picture& picture) {
   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
   // the child thread. VDA is responsible to call PictureReady on the child
   // thread when a picture buffer is delivered the first time.
@@ -294,8 +301,8 @@
 
 void GpuVideoDecodeAccelerator::NotifyError(
     media::VideoDecodeAccelerator::Error error) {
-  if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
-          host_route_id_, error))) {
+  if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(host_route_id_,
+                                                                 error))) {
     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
                 << "failed";
   }
@@ -524,4 +531,4 @@
   uncleared_textures_.erase(it);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.h b/media/gpu/ipc/service/gpu_video_decode_accelerator.h
similarity index 93%
rename from content/common/gpu/media/gpu_video_decode_accelerator.h
rename to media/gpu/ipc/service/gpu_video_decode_accelerator.h
index 2f338d8..0201c6e1 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_video_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <stdint.h>
 
@@ -16,13 +16,12 @@
 #include "base/memory/ref_counted.h"
 #include "base/memory/shared_memory.h"
 #include "base/synchronization/waitable_event.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
 #include "gpu/command_buffer/service/texture_manager.h"
 #include "gpu/config/gpu_info.h"
 #include "gpu/ipc/service/gpu_command_buffer_stub.h"
-#include "gpu/ipc/service/gpu_command_buffer_stub.h"
 #include "ipc/ipc_listener.h"
 #include "ipc/ipc_sender.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gfx/geometry/size.h"
 
@@ -30,7 +29,7 @@
 struct GpuPreferences;
 }  // namespace gpu
 
-namespace content {
+namespace media {
 
 class GpuVideoDecodeAccelerator
     : public IPC::Listener,
@@ -165,6 +164,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/gpu_video_encode_accelerator.cc b/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
similarity index 86%
rename from content/common/gpu/media/gpu_video_encode_accelerator.cc
rename to media/gpu/ipc/service/gpu_video_encode_accelerator.cc
index 6381658..3287d7b 100644
--- a/content/common/gpu/media/gpu_video_encode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_video_encode_accelerator.h"
 
 #include <memory>
 
@@ -21,23 +21,23 @@
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/limits.h"
 #include "media/base/video_frame.h"
-#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/gpu_video_accelerator_util.h"
 #include "media/gpu/ipc/common/media_messages.h"
 
 #if defined(OS_CHROMEOS)
 #if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
+#include "media/gpu/v4l2_video_encode_accelerator.h"
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_encode_accelerator.h"
+#include "media/gpu/vaapi_video_encode_accelerator.h"
 #endif
 #elif defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
-#include "content/common/gpu/media/android_video_encode_accelerator.h"
+#include "media/gpu/android_video_encode_accelerator.h"
 #elif defined(OS_MACOSX)
-#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
+#include "media/gpu/vt_video_encode_accelerator_mac.h"
 #endif
 
-namespace content {
+namespace media {
 
 static bool MakeDecoderContextCurrent(
     const base::WeakPtr<gpu::GpuCommandBufferStub> stub) {
@@ -79,7 +79,8 @@
     media::VideoCodecProfile output_profile,
     uint32_t initial_bitrate) {
   DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): "
-              "input_format=" << input_format
+              "input_format="
+           << input_format
            << ", input_visible_size=" << input_visible_size.ToString()
            << ", output_profile=" << output_profile
            << ", initial_bitrate=" << initial_bitrate;
@@ -95,8 +96,8 @@
       input_visible_size.height() > media::limits::kMaxDimension ||
       input_visible_size.GetArea() > media::limits::kMaxCanvas) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
-                   "input_visible_size " << input_visible_size.ToString()
-                << " too large";
+                   "input_visible_size "
+                << input_visible_size.ToString() << " too large";
     return false;
   }
 
@@ -108,11 +109,9 @@
   // Try all possible encoders and use the first successful encoder.
   for (size_t i = 0; i < create_vea_fps.size(); ++i) {
     encoder_ = (*create_vea_fps[i])();
-    if (encoder_ && encoder_->Initialize(input_format,
-                                         input_visible_size,
-                                         output_profile,
-                                         initial_bitrate,
-                                         this)) {
+    if (encoder_ &&
+        encoder_->Initialize(input_format, input_visible_size, output_profile,
+                             initial_bitrate, this)) {
       input_format_ = input_format;
       input_visible_size_ = input_visible_size;
       return true;
@@ -271,7 +270,8 @@
 
   if (params.frame_id < 0) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): invalid "
-                   "frame_id=" << params.frame_id;
+                   "frame_id="
+                << params.frame_id;
     NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
@@ -301,27 +301,18 @@
       reinterpret_cast<uint8_t*>(shm->memory()) + aligned_offset;
   scoped_refptr<media::VideoFrame> frame =
       media::VideoFrame::WrapExternalSharedMemory(
-          input_format_,
-          input_coded_size_,
-          gfx::Rect(input_visible_size_),
-          input_visible_size_,
-          shm_memory,
-          params.buffer_size,
-          params.buffer_handle,
-          params.buffer_offset,
-          params.timestamp);
+          input_format_, input_coded_size_, gfx::Rect(input_visible_size_),
+          input_visible_size_, shm_memory, params.buffer_size,
+          params.buffer_handle, params.buffer_offset, params.timestamp);
   if (!frame) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
                 << "could not create a frame";
     NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
-  frame->AddDestructionObserver(
-      media::BindToCurrentLoop(
-          base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished,
-                     weak_this_factory_.GetWeakPtr(),
-                     params.frame_id,
-                     base::Passed(&shm))));
+  frame->AddDestructionObserver(media::BindToCurrentLoop(base::Bind(
+      &GpuVideoEncodeAccelerator::EncodeFrameFinished,
+      weak_this_factory_.GetWeakPtr(), params.frame_id, base::Passed(&shm))));
   encoder_->Encode(frame, params.force_keyframe);
 }
 
@@ -329,8 +320,8 @@
     const AcceleratedVideoEncoderMsg_Encode_Params2& params) {
   DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode2: frame_id = "
            << params.frame_id << ", size=" << params.size.ToString()
-           << ", force_keyframe=" << params.force_keyframe << ", handle type="
-           << params.gpu_memory_buffer_handles[0].type;
+           << ", force_keyframe=" << params.force_keyframe
+           << ", handle type=" << params.gpu_memory_buffer_handles[0].type;
   // Encoding GpuMemoryBuffer backed frames is not supported.
   NOTREACHED();
 }
@@ -340,19 +331,21 @@
     base::SharedMemoryHandle buffer_handle,
     uint32_t buffer_size) {
   DVLOG(3) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
-              "buffer_id=" << buffer_id
-           << ", buffer_size=" << buffer_size;
+              "buffer_id="
+           << buffer_id << ", buffer_size=" << buffer_size;
   if (!encoder_)
     return;
   if (buffer_id < 0) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
-                   "invalid buffer_id=" << buffer_id;
+                   "invalid buffer_id="
+                << buffer_id;
     NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
   if (buffer_size < output_buffer_size_) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
-                   "buffer too small for buffer_id=" << buffer_id;
+                   "buffer too small for buffer_id="
+                << buffer_id;
     NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
@@ -369,8 +362,8 @@
     uint32_t bitrate,
     uint32_t framerate) {
   DVLOG(2) << "GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(): "
-              "bitrate=" << bitrate
-           << ", framerate=" << framerate;
+              "bitrate="
+           << bitrate << ", framerate=" << framerate;
   if (!encoder_)
     return;
   encoder_->RequestEncodingParametersChange(bitrate, framerate);
@@ -388,4 +381,4 @@
   stub_->channel()->Send(message);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/gpu_video_encode_accelerator.h b/media/gpu/ipc/service/gpu_video_encode_accelerator.h
similarity index 94%
rename from content/common/gpu/media/gpu_video_encode_accelerator.h
rename to media/gpu/ipc/service/gpu_video_encode_accelerator.h
index 738f764..cfe44d2 100644
--- a/content/common/gpu/media/gpu_video_encode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_video_encode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_ENCODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -30,7 +30,7 @@
 struct GpuPreferences;
 }  // namespace gpu
 
-namespace content {
+namespace media {
 
 // This class encapsulates the GPU process view of a VideoEncodeAccelerator,
 // wrapping the platform-specific VideoEncodeAccelerator instance.  It handles
@@ -131,6 +131,6 @@
   DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_IPC_SERVICE_GPU_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/media_channel.cc b/media/gpu/ipc/service/media_channel.cc
similarity index 95%
rename from content/common/gpu/media/media_channel.cc
rename to media/gpu/ipc/service/media_channel.cc
index e23e296..f4bb4de 100644
--- a/content/common/gpu/media/media_channel.cc
+++ b/media/gpu/ipc/service/media_channel.cc
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/media_channel.h"
+#include "media/gpu/ipc/service/media_channel.h"
 
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "media/gpu/ipc/common/media_messages.h"
+#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_video_encode_accelerator.h"
 
-namespace content {
+namespace media {
 
 namespace {
 
@@ -142,4 +142,4 @@
   // self-delete during destruction of this stub.
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/media_channel.h b/media/gpu/ipc/service/media_channel.h
similarity index 84%
rename from content/common/gpu/media/media_channel.h
rename to media/gpu/ipc/service/media_channel.h
index 49c1ee7..3c6c1b0d 100644
--- a/content/common/gpu/media/media_channel.h
+++ b/media/gpu/ipc/service/media_channel.h
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
-#define CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+#ifndef MEDIA_GPU_IPC_SERVICE_MEDIA_CHANNEL_H_
+#define MEDIA_GPU_IPC_SERVICE_MEDIA_CHANNEL_H_
 
 #include <memory>
 
-#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
 #include "ipc/ipc_listener.h"
 #include "ipc/ipc_sender.h"
+#include "media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h"
 #include "media/video/video_decode_accelerator.h"
 
 namespace media {
@@ -21,7 +21,7 @@
 class GpuCommandBufferStub;
 }
 
-namespace content {
+namespace media {
 
 class MediaChannelDispatchHelper;
 
@@ -54,6 +54,6 @@
   DISALLOW_COPY_AND_ASSIGN(MediaChannel);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+#endif  // MEDIA_GPU_IPC_SERVICE_MEDIA_CHANNEL_H_
diff --git a/content/common/gpu/media/media_service.cc b/media/gpu/ipc/service/media_service.cc
similarity index 78%
rename from content/common/gpu/media/media_service.cc
rename to media/gpu/ipc/service/media_service.cc
index 1fbd51e..a5e6d219 100644
--- a/content/common/gpu/media/media_service.cc
+++ b/media/gpu/ipc/service/media_service.cc
@@ -2,20 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/media_service.h"
+#include "media/gpu/ipc/service/media_service.h"
 
 #include <memory>
 #include <utility>
 
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
-#include "content/common/gpu/media/media_channel.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "gpu/ipc/service/gpu_channel_manager.h"
 #include "ipc/ipc_message_macros.h"
 #include "ipc/param_traits_macros.h"
+#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
+#include "media/gpu/ipc/service/gpu_video_encode_accelerator.h"
+#include "media/gpu/ipc/service/media_channel.h"
 
-namespace content {
+namespace media {
 
 MediaService::MediaService(gpu::GpuChannelManager* channel_manager)
     : channel_manager_(channel_manager) {}
@@ -38,4 +38,4 @@
   media_channels_.clear();
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/media_service.h b/media/gpu/ipc/service/media_service.h
similarity index 81%
rename from content/common/gpu/media/media_service.h
rename to media/gpu/ipc/service/media_service.h
index 0d8d6499..3ebc283 100644
--- a/content/common/gpu/media/media_service.h
+++ b/media/gpu/ipc/service/media_service.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+#ifndef MEDIA_GPU_IPC_SERVICE_MEDIA_SERVICE_H_
+#define MEDIA_GPU_IPC_SERVICE_MEDIA_SERVICE_H_
 
 #include <stdint.h>
 
@@ -20,7 +20,7 @@
 class GpuChannelManager;
 }
 
-namespace content {
+namespace media {
 
 class MediaChannel;
 
@@ -40,6 +40,6 @@
   DISALLOW_COPY_AND_ASSIGN(MediaService);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+#endif  // MEDIA_GPU_IPC_SERVICE_MEDIA_SERVICE_H_
diff --git a/content/common/gpu/media/jpeg_decode_accelerator_unittest.cc b/media/gpu/jpeg_decode_accelerator_unittest.cc
similarity index 90%
rename from content/common/gpu/media/jpeg_decode_accelerator_unittest.cc
rename to media/gpu/jpeg_decode_accelerator_unittest.cc
index d872321..dc8757a 100644
--- a/content/common/gpu/media/jpeg_decode_accelerator_unittest.cc
+++ b/media/gpu/jpeg_decode_accelerator_unittest.cc
@@ -24,27 +24,27 @@
 #include "base/strings/string_split.h"
 #include "base/thread_task_runner_handle.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
 #include "media/base/test_data_util.h"
 #include "media/filters/jpeg_parser.h"
+#include "media/gpu/video_accelerator_unittest_helpers.h"
 #include "media/video/jpeg_decode_accelerator.h"
 #include "third_party/libyuv/include/libyuv.h"
 #include "ui/gfx/codec/jpeg_codec.h"
 
 #if defined(OS_CHROMEOS)
 #if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_jpeg_decode_accelerator.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_jpeg_decode_accelerator.h"
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_jpeg_decode_accelerator.h"
+#include "media/gpu/vaapi_wrapper.h"
 #endif
 #endif
 
 using media::JpegDecodeAccelerator;
 
-namespace content {
+namespace media {
 namespace {
 
 // Default test image file.
@@ -130,11 +130,9 @@
 
 JpegClient::JpegClient(const std::vector<TestImageFile*>& test_image_files,
                        ClientStateNotification<ClientState>* note)
-    : test_image_files_(test_image_files), state_(CS_CREATED), note_(note) {
-}
+    : test_image_files_(test_image_files), state_(CS_CREATED), note_(note) {}
 
-JpegClient::~JpegClient() {
-}
+JpegClient::~JpegClient() {}
 
 void JpegClient::CreateJpegDecoder() {
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
@@ -179,7 +177,8 @@
     SetState(CS_DECODE_PASS);
   } else {
     LOG(ERROR) << "The mean absolute difference between software and hardware "
-                  "decode is " << difference;
+                  "decode is "
+               << difference;
     SetState(CS_ERROR);
   }
 }
@@ -256,15 +255,10 @@
                                           image_file->data_str.size());
   scoped_refptr<media::VideoFrame> out_frame_ =
       media::VideoFrame::WrapExternalSharedMemory(
-          media::PIXEL_FORMAT_I420,
-          image_file->visible_size,
-          gfx::Rect(image_file->visible_size),
-          image_file->visible_size,
-          static_cast<uint8_t*>(hw_out_shm_->memory()),
-          image_file->output_size,
-          hw_out_shm_->handle(),
-          0,
-          base::TimeDelta());
+          media::PIXEL_FORMAT_I420, image_file->visible_size,
+          gfx::Rect(image_file->visible_size), image_file->visible_size,
+          static_cast<uint8_t*>(hw_out_shm_->memory()), image_file->output_size,
+          hw_out_shm_->handle(), 0, base::TimeDelta());
   LOG_ASSERT(out_frame_.get());
   decoder_->Decode(bitstream_buffer, out_frame_);
 }
@@ -277,30 +271,22 @@
   uint8_t* uplane =
       yplane +
       media::VideoFrame::PlaneSize(format, media::VideoFrame::kYPlane,
-                                   image_file->visible_size).GetArea();
+                                   image_file->visible_size)
+          .GetArea();
   uint8_t* vplane =
       uplane +
       media::VideoFrame::PlaneSize(format, media::VideoFrame::kUPlane,
-                                   image_file->visible_size).GetArea();
+                                   image_file->visible_size)
+          .GetArea();
   int yplane_stride = image_file->visible_size.width();
   int uv_plane_stride = yplane_stride / 2;
 
   if (libyuv::ConvertToI420(
-          static_cast<uint8_t*>(in_shm_->memory()),
-          image_file->data_str.size(),
-          yplane,
-          yplane_stride,
-          uplane,
-          uv_plane_stride,
-          vplane,
-          uv_plane_stride,
-          0,
-          0,
-          image_file->visible_size.width(),
-          image_file->visible_size.height(),
-          image_file->visible_size.width(),
-          image_file->visible_size.height(),
-          libyuv::kRotate0,
+          static_cast<uint8_t*>(in_shm_->memory()), image_file->data_str.size(),
+          yplane, yplane_stride, uplane, uv_plane_stride, vplane,
+          uv_plane_stride, 0, 0, image_file->visible_size.width(),
+          image_file->visible_size.height(), image_file->visible_size.width(),
+          image_file->visible_size.height(), libyuv::kRotate0,
           libyuv::FOURCC_MJPG) != 0) {
     LOG(ERROR) << "Software decode " << image_file->filename << " failed.";
     return false;
@@ -313,7 +299,7 @@
   JpegDecodeAcceleratorTestEnvironment(
       const base::FilePath::CharType* jpeg_filenames) {
     user_jpeg_filenames_ =
-        jpeg_filenames ? jpeg_filenames: kDefaultJpegFilename;
+        jpeg_filenames ? jpeg_filenames : kDefaultJpegFilename;
   }
   void SetUp() override;
   void TearDown() override;
@@ -421,13 +407,13 @@
 }
 
 void JpegDecodeAcceleratorTestEnvironment::ReadTestJpegImage(
-    base::FilePath& input_file, TestImageFile* image_data) {
+    base::FilePath& input_file,
+    TestImageFile* image_data) {
   ASSERT_TRUE(base::ReadFileToString(input_file, &image_data->data_str));
 
   ASSERT_TRUE(media::ParseJpegPicture(
       reinterpret_cast<const uint8_t*>(image_data->data_str.data()),
-                                       image_data->data_str.size(),
-      &image_data->parse_result));
+      image_data->data_str.size(), &image_data->parse_result));
   image_data->visible_size.SetSize(
       image_data->parse_result.frame_header.visible_width,
       image_data->parse_result.frame_header.visible_height);
@@ -471,8 +457,7 @@
     for (size_t i = 0; i < num_concurrent_decoders; i++) {
       decoder_thread.task_runner()->PostTask(
           FROM_HERE, base::Bind(&JpegClient::StartDecode,
-                                base::Unretained(clients[i]),
-                                index));
+                                base::Unretained(clients[i]), index));
     }
     for (size_t i = 0; i < num_concurrent_decoders; i++) {
       ASSERT_EQ(notes[i]->Wait(), expected_status_[index]);
@@ -544,7 +529,7 @@
 }
 
 }  // namespace
-}  // namespace content
+}  // namespace media
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
@@ -569,7 +554,7 @@
       continue;
     }
     if (it->first == "save_to_file") {
-      content::g_save_to_file = true;
+      media::g_save_to_file = true;
       continue;
     }
     if (it->first == "v" || it->first == "vmodule")
@@ -577,14 +562,12 @@
     LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second;
   }
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  content::VaapiWrapper::PreSandboxInitialization();
+  media::VaapiWrapper::PreSandboxInitialization();
 #endif
 
-  content::g_env =
-      reinterpret_cast<content::JpegDecodeAcceleratorTestEnvironment*>(
-          testing::AddGlobalTestEnvironment(
-              new content::JpegDecodeAcceleratorTestEnvironment(
-                  jpeg_filenames)));
+  media::g_env = reinterpret_cast<media::JpegDecodeAcceleratorTestEnvironment*>(
+      testing::AddGlobalTestEnvironment(
+          new media::JpegDecodeAcceleratorTestEnvironment(jpeg_filenames)));
 
   return RUN_ALL_TESTS();
 }
diff --git a/media/gpu/media_gpu_export.h b/media/gpu/media_gpu_export.h
new file mode 100644
index 0000000..b5b80aa
--- /dev/null
+++ b/media/gpu/media_gpu_export.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_MEDIA_GPU_EXPORT_H_
+#define MEDIA_GPU_MEDIA_GPU_EXPORT_H_
+
+// Define MEDIA_GPU_EXPORT so that functionality implemented by the Media GPU
+// module can be exported to consumers.
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(MEDIA_GPU_IMPLEMENTATION)
+#define MEDIA_GPU_EXPORT __declspec(dllexport)
+#else
+#define MEDIA_GPU_EXPORT __declspec(dllimport)
+#endif  // defined(MEDIA_GPU_IMPLEMENTATION)
+
+#else  // defined(WIN32)
+#if defined(MEDIA_GPU_IMPLEMENTATION)
+#define MEDIA_GPU_EXPORT __attribute__((visibility("default")))
+#else
+#define MEDIA_GPU_EXPORT
+#endif
+#endif
+
+#else  // defined(COMPONENT_BUILD)
+#define MEDIA_GPU_EXPORT
+#endif
+
+#endif  // MEDIA_GPU_MEDIA_GPU_EXPORT_H_
diff --git a/content/common/gpu/media/rendering_helper.cc b/media/gpu/rendering_helper.cc
similarity index 86%
rename from content/common/gpu/media/rendering_helper.cc
rename to media/gpu/rendering_helper.cc
index cabe3fb..b72dc10 100644
--- a/content/common/gpu/media/rendering_helper.cc
+++ b/media/gpu/rendering_helper.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/rendering_helper.h"
+#include "media/gpu/rendering_helper.h"
 
 #include <string.h>
 
@@ -72,7 +72,7 @@
   CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
 }
 
-namespace content {
+namespace media {
 namespace {
 
 void WaitForSwapAck(const base::Closure& callback, gfx::SwapResult result) {
@@ -127,18 +127,16 @@
 
   void OnWindowStateChanged(ui::PlatformWindowState new_state) override {}
 
-  void OnLostCapture() override {};
+  void OnLostCapture() override{};
 
   void OnAcceleratedWidgetAvailable(gfx::AcceleratedWidget widget,
                                     float device_pixel_ratio) override {
     accelerated_widget_ = widget;
   }
 
-  void OnAcceleratedWidgetDestroyed() override {
-    NOTREACHED();
-  }
+  void OnAcceleratedWidgetDestroyed() override { NOTREACHED(); }
 
-  void OnActivationChanged(bool active) override {};
+  void OnActivationChanged(bool active) override{};
 
   gfx::AcceleratedWidget accelerated_widget() const {
     return accelerated_widget_;
@@ -158,8 +156,7 @@
 #endif  // defined(USE_OZONE)
 
 RenderingHelperParams::RenderingHelperParams()
-    : rendering_fps(0), warm_up_iterations(0), render_as_thumbnails(false) {
-}
+    : rendering_fps(0), warm_up_iterations(0), render_as_thumbnails(false) {}
 
 RenderingHelperParams::RenderingHelperParams(
     const RenderingHelperParams& other) = default;
@@ -180,14 +177,12 @@
 }
 
 RenderingHelper::RenderedVideo::RenderedVideo()
-    : is_flushing(false), frames_to_drop(0) {
-}
+    : is_flushing(false), frames_to_drop(0) {}
 
 RenderingHelper::RenderedVideo::RenderedVideo(const RenderedVideo& other) =
     default;
 
-RenderingHelper::RenderedVideo::~RenderedVideo() {
-}
+RenderingHelper::RenderedVideo::~RenderedVideo() {}
 
 // static
 void RenderingHelper::InitializeOneOff(base::WaitableEvent* done) {
@@ -216,18 +211,10 @@
 
 void RenderingHelper::Setup() {
 #if defined(OS_WIN)
-  window_ = CreateWindowEx(0,
-                           L"Static",
-                           L"VideoDecodeAcceleratorTest",
-                           WS_OVERLAPPEDWINDOW | WS_VISIBLE,
-                           0,
-                           0,
-                           GetSystemMetrics(SM_CXSCREEN),
-                           GetSystemMetrics(SM_CYSCREEN),
-                           NULL,
-                           NULL,
-                           NULL,
-                           NULL);
+  window_ = CreateWindowEx(
+      0, L"Static", L"VideoDecodeAcceleratorTest",
+      WS_OVERLAPPEDWINDOW | WS_VISIBLE, 0, 0, GetSystemMetrics(SM_CXSCREEN),
+      GetSystemMetrics(SM_CYSCREEN), NULL, NULL, NULL, NULL);
 #elif defined(USE_X11)
   Display* display = gfx::GetXDisplay();
   Screen* screen = DefaultScreenOfDisplay(display);
@@ -241,18 +228,11 @@
   window_attributes.override_redirect = true;
   int depth = DefaultDepth(display, DefaultScreen(display));
 
-  window_ = XCreateWindow(display,
-                          DefaultRootWindow(display),
-                          0,
-                          0,
-                          XWidthOfScreen(screen),
-                          XHeightOfScreen(screen),
-                          0 /* border width */,
-                          depth,
-                          CopyFromParent /* class */,
-                          CopyFromParent /* visual */,
-                          (CWBackPixel | CWOverrideRedirect),
-                          &window_attributes);
+  window_ = XCreateWindow(
+      display, DefaultRootWindow(display), 0, 0, XWidthOfScreen(screen),
+      XHeightOfScreen(screen), 0 /* border width */, depth,
+      CopyFromParent /* class */, CopyFromParent /* visual */,
+      (CWBackPixel | CWOverrideRedirect), &window_attributes);
   XStoreName(display, window_, "VideoDecodeAcceleratorTest");
   XSelectInput(display, window_, ExposureMask);
   XMapWindow(display, window_);
@@ -277,8 +257,7 @@
   display_configurator_->SetDelegateForTesting(0);
   display_configurator_->AddObserver(&display_setup_observer);
   display_configurator_->Init(
-      ui::OzonePlatform::GetInstance()->CreateNativeDisplayDelegate(),
-      true);
+      ui::OzonePlatform::GetInstance()->CreateNativeDisplayDelegate(), true);
   display_configurator_->ForceInitialConfigure(0);
   // Make sure all the display configuration is applied.
   wait_display_setup.Run();
@@ -356,8 +335,8 @@
 #endif  // defined(USE_OZONE)
   screen_size_ = gl_surface_->GetSize();
 
-  gl_context_ = gfx::GLContext::CreateGLContext(
-      NULL, gl_surface_.get(), gfx::PreferIntegratedGpu);
+  gl_context_ = gfx::GLContext::CreateGLContext(NULL, gl_surface_.get(),
+                                                gfx::PreferIntegratedGpu);
   CHECK(gl_context_->MakeCurrent(gl_surface_.get()));
 
   CHECK_GT(params.window_sizes.size(), 0U);
@@ -378,15 +357,9 @@
     glGenFramebuffersEXT(1, &thumbnails_fbo_id_);
     glGenTextures(1, &thumbnails_texture_id_);
     glBindTexture(GL_TEXTURE_2D, thumbnails_texture_id_);
-    glTexImage2D(GL_TEXTURE_2D,
-                 0,
-                 GL_RGB,
-                 thumbnails_fbo_size_.width(),
-                 thumbnails_fbo_size_.height(),
-                 0,
-                 GL_RGB,
-                 GL_UNSIGNED_SHORT_5_6_5,
-                 NULL);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, thumbnails_fbo_size_.width(),
+                 thumbnails_fbo_size_.height(), 0, GL_RGB,
+                 GL_UNSIGNED_SHORT_5_6_5, NULL);
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
@@ -394,11 +367,8 @@
     glBindTexture(GL_TEXTURE_2D, 0);
 
     glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
-    glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
-                              GL_COLOR_ATTACHMENT0,
-                              GL_TEXTURE_2D,
-                              thumbnails_texture_id_,
-                              0);
+    glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+                              GL_TEXTURE_2D, thumbnails_texture_id_, 0);
 
     GLenum fb_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
     CHECK(fb_status == GL_FRAMEBUFFER_COMPLETE) << fb_status;
@@ -414,21 +384,21 @@
   // in the vertex shader for this to be rendered the right way up.
   // In the case of thumbnail rendering we use the same vertex shader
   // to render the FBO the screen, where we do not want this flipping.
-  static const float kVertices[] =
-      { -1.f, 1.f, -1.f, -1.f, 1.f, 1.f, 1.f, -1.f, };
-  static const float kTextureCoords[] = { 0, 1, 0, 0, 1, 1, 1, 0, };
-  static const char kVertexShader[] = STRINGIZE(
-      varying vec2 interp_tc;
-      attribute vec4 in_pos;
-      attribute vec2 in_tc;
-      uniform bool tex_flip;
-      void main() {
-        if (tex_flip)
-          interp_tc = vec2(in_tc.x, 1.0 - in_tc.y);
-        else
-          interp_tc = in_tc;
-        gl_Position = in_pos;
-      });
+  static const float kVertices[] = {
+      -1.f, 1.f, -1.f, -1.f, 1.f, 1.f, 1.f, -1.f,
+  };
+  static const float kTextureCoords[] = {
+      0, 1, 0, 0, 1, 1, 1, 0,
+  };
+  static const char kVertexShader[] =
+      STRINGIZE(varying vec2 interp_tc; attribute vec4 in_pos;
+                attribute vec2 in_tc; uniform bool tex_flip; void main() {
+                  if (tex_flip)
+                    interp_tc = vec2(in_tc.x, 1.0 - in_tc.y);
+                  else
+                    interp_tc = in_tc;
+                  gl_Position = in_pos;
+                });
 
 #if GL_VARIANT_EGL
   static const char kFragmentShader[] =
@@ -447,19 +417,14 @@
       "  gl_FragColor = color;\n"
       "}\n";
 #else
-  static const char kFragmentShader[] = STRINGIZE(
-      varying vec2 interp_tc;
-      uniform sampler2D tex;
-      void main() {
-        gl_FragColor = texture2D(tex, interp_tc);
-      });
+  static const char kFragmentShader[] =
+      STRINGIZE(varying vec2 interp_tc; uniform sampler2D tex;
+                void main() { gl_FragColor = texture2D(tex, interp_tc); });
 #endif
   program_ = glCreateProgram();
-  CreateShader(
-      program_, GL_VERTEX_SHADER, kVertexShader, arraysize(kVertexShader));
-  CreateShader(program_,
-               GL_FRAGMENT_SHADER,
-               kFragmentShader,
+  CreateShader(program_, GL_VERTEX_SHADER, kVertexShader,
+               arraysize(kVertexShader));
+  CreateShader(program_, GL_FRAGMENT_SHADER, kFragmentShader,
                arraysize(kFragmentShader));
   glLinkProgram(program_);
   int result = GL_FALSE;
@@ -525,14 +490,8 @@
       new GLubyte[screen_size_.GetArea() * 2]());
   glGenTextures(1, &texture_id);
   glBindTexture(GL_TEXTURE_2D, texture_id);
-  glTexImage2D(GL_TEXTURE_2D,
-               0,
-               GL_RGB,
-               screen_size_.width(),
-               screen_size_.height(),
-               0,
-               GL_RGB,
-               GL_UNSIGNED_SHORT_5_6_5,
+  glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, screen_size_.width(),
+               screen_size_.height(), 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5,
                emptyData.get());
   for (int i = 0; i < warm_up_iterations; ++i) {
     RenderTexture(GL_TEXTURE_2D, texture_id);
@@ -572,27 +531,17 @@
                                     const gfx::Size& size,
                                     base::WaitableEvent* done) {
   if (base::MessageLoop::current() != message_loop_) {
-    message_loop_->PostTask(FROM_HERE,
-                            base::Bind(&RenderingHelper::CreateTexture,
-                                       base::Unretained(this),
-                                       texture_target,
-                                       texture_id,
-                                       size,
-                                       done));
+    message_loop_->PostTask(
+        FROM_HERE,
+        base::Bind(&RenderingHelper::CreateTexture, base::Unretained(this),
+                   texture_target, texture_id, size, done));
     return;
   }
   glGenTextures(1, texture_id);
   glBindTexture(texture_target, *texture_id);
   if (texture_target == GL_TEXTURE_2D) {
-    glTexImage2D(GL_TEXTURE_2D,
-                 0,
-                 GL_RGBA,
-                 size.width(),
-                 size.height(),
-                 0,
-                 GL_RGBA,
-                 GL_UNSIGNED_BYTE,
-                 NULL);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+                 GL_RGBA, GL_UNSIGNED_BYTE, NULL);
   }
   glTexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
   glTexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
@@ -707,12 +656,8 @@
   glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
   glPixelStorei(GL_PACK_ALIGNMENT, 1);
   // We can only count on GL_RGBA/GL_UNSIGNED_BYTE support.
-  glReadPixels(0,
-               0,
-               thumbnails_fbo_size_.width(),
-               thumbnails_fbo_size_.height(),
-               GL_RGBA,
-               GL_UNSIGNED_BYTE,
+  glReadPixels(0, 0, thumbnails_fbo_size_.width(),
+               thumbnails_fbo_size_.height(), GL_RGBA, GL_UNSIGNED_BYTE,
                &rgba[0]);
   glBindFramebufferEXT(GL_FRAMEBUFFER,
                        gl_surface_->GetBackingFrameBufferObject());
@@ -763,7 +708,7 @@
   // Frames that will be returned to the client (via the no_longer_needed_cb)
   // after this vector falls out of scope at the end of this method. We need
   // to keep references to them until after SwapBuffers() call below.
-  std::vector<scoped_refptr<VideoFrameTexture> > frames_to_be_returned;
+  std::vector<scoped_refptr<VideoFrameTexture>> frames_to_be_returned;
   bool need_swap_buffer = false;
   if (render_as_thumbnails_) {
     // In render_as_thumbnails_ mode, we render the FBO content on the
@@ -796,8 +741,7 @@
     return;
   }
 
-  gl_surface_->SwapBuffersAsync(
-          base::Bind(&WaitForSwapAck, schedule_frame));
+  gl_surface_->SwapBuffersAsync(base::Bind(&WaitForSwapAck, schedule_frame));
 }
 
 // Helper function for the LayoutRenderingAreas(). The |lengths| are the
@@ -899,7 +843,7 @@
     DropOneFrameForAllVideos();
   }
 
-  message_loop_->PostDelayedTask(
-      FROM_HERE, render_task_.callback(), target - now);
+  message_loop_->PostDelayedTask(FROM_HERE, render_task_.callback(),
+                                 target - now);
 }
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/rendering_helper.h b/media/gpu/rendering_helper.h
similarity index 95%
rename from content/common/gpu/media/rendering_helper.h
rename to media/gpu/rendering_helper.h
index 1fc592e..029d5a4 100644
--- a/content/common/gpu/media/rendering_helper.h
+++ b/media/gpu/rendering_helper.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_RENDERING_HELPER_H_
-#define CONTENT_COMMON_GPU_MEDIA_RENDERING_HELPER_H_
+#ifndef MEDIA_GPU_RENDERING_HELPER_H_
+#define MEDIA_GPU_RENDERING_HELPER_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -32,7 +32,7 @@
 class DisplayConfigurator;
 }
 
-namespace content {
+namespace media {
 
 class VideoFrameTexture : public base::RefCounted<VideoFrameTexture> {
  public:
@@ -86,7 +86,6 @@
 // (except for ctor/dtor) ensure they're being run on a single thread.
 class RenderingHelper {
  public:
-
   RenderingHelper();
   ~RenderingHelper();
 
@@ -161,7 +160,7 @@
     int frames_to_drop;
 
     // The video frames pending for rendering.
-    std::queue<scoped_refptr<VideoFrameTexture> > pending_frames;
+    std::queue<scoped_refptr<VideoFrameTexture>> pending_frames;
 
     RenderedVideo();
     RenderedVideo(const RenderedVideo& other);
@@ -225,6 +224,6 @@
   DISALLOW_COPY_AND_ASSIGN(RenderingHelper);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_RENDERING_HELPER_H_
+#endif  // MEDIA_GPU_RENDERING_HELPER_H_
diff --git a/content/common/gpu/media/shared_memory_region.cc b/media/gpu/shared_memory_region.cc
similarity index 92%
rename from content/common/gpu/media/shared_memory_region.cc
rename to media/gpu/shared_memory_region.cc
index 4ee6a24..13df2041a 100644
--- a/content/common/gpu/media/shared_memory_region.cc
+++ b/media/gpu/shared_memory_region.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/sys_info.h"
-#include "content/common/gpu/media/shared_memory_region.h"
+#include "media/gpu/shared_memory_region.h"
 
-namespace content {
+namespace media {
 
 SharedMemoryRegion::SharedMemoryRegion(const base::SharedMemoryHandle& handle,
                                        off_t offset,
@@ -39,4 +39,4 @@
   return addr ? addr + alignment_size_ : nullptr;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/shared_memory_region.h b/media/gpu/shared_memory_region.h
similarity index 89%
rename from content/common/gpu/media/shared_memory_region.h
rename to media/gpu/shared_memory_region.h
index f7c5db29..2ca11d7 100644
--- a/content/common/gpu/media/shared_memory_region.h
+++ b/media/gpu/shared_memory_region.h
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
-#define CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+#ifndef MEDIA_GPU_SHARED_MEMORY_REGION_H_
+#define MEDIA_GPU_SHARED_MEMORY_REGION_H_
 
 #include "base/memory/shared_memory.h"
 #include "media/base/bitstream_buffer.h"
 
-namespace content {
+namespace media {
 
 // Helper class to access a region of a SharedMemory. Different from
 // SharedMemory, in which the |offset| of function MapAt() must be aligned to
@@ -52,6 +52,6 @@
   DISALLOW_COPY_AND_ASSIGN(SharedMemoryRegion);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+#endif  // MEDIA_GPU_SHARED_MEMORY_REGION_H_
diff --git a/content/common/gpu/media/tegra_v4l2_device.cc b/media/gpu/tegra_v4l2_device.cc
similarity index 94%
rename from content/common/gpu/media/tegra_v4l2_device.cc
rename to media/gpu/tegra_v4l2_device.cc
index 6efe16f..25b1c6a 100644
--- a/content/common/gpu/media/tegra_v4l2_device.cc
+++ b/media/gpu/tegra_v4l2_device.cc
@@ -9,10 +9,10 @@
 #include "base/lazy_instance.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/tegra_v4l2_device.h"
+#include "media/gpu/tegra_v4l2_device.h"
 #include "ui/gl/gl_bindings.h"
 
-namespace content {
+namespace media {
 
 namespace {
 const char kDecoderDevice[] = "/dev/tegra_avpchannel";
@@ -93,9 +93,7 @@
     LAZY_INSTANCE_INITIALIZER;
 
 TegraV4L2Device::TegraV4L2Device(Type type)
-    : V4L2Device(type),
-      device_fd_(-1) {
-}
+    : V4L2Device(type), device_fd_(-1) {}
 
 TegraV4L2Device::~TegraV4L2Device() {
   if (device_fd_ != -1) {
@@ -205,11 +203,8 @@
 
   EGLint attr = EGL_NONE;
   EGLImageKHR egl_image =
-      eglCreateImageKHR(egl_display,
-                        egl_context,
-                        EGL_GL_TEXTURE_2D_KHR,
-                        reinterpret_cast<EGLClientBuffer>(texture_id),
-                        &attr);
+      eglCreateImageKHR(egl_display, egl_context, EGL_GL_TEXTURE_2D_KHR,
+                        reinterpret_cast<EGLClientBuffer>(texture_id), &attr);
   if (egl_image == EGL_NO_IMAGE_KHR) {
     LOG(ERROR) << "Unable to create EGL image";
     return egl_image;
@@ -227,7 +222,9 @@
   return eglDestroyImageKHR(egl_display, egl_image);
 }
 
-GLenum TegraV4L2Device::GetTextureTarget() { return GL_TEXTURE_2D; }
+GLenum TegraV4L2Device::GetTextureTarget() {
+  return GL_TEXTURE_2D;
+}
 
 uint32_t TegraV4L2Device::PreferredInputFormat() {
   // TODO(posciak): We should support "dontcare" returns here once we
@@ -236,4 +233,4 @@
   return V4L2_PIX_FMT_YUV420M;
 }
 
-}  //  namespace content
+}  //  namespace media
diff --git a/content/common/gpu/media/tegra_v4l2_device.h b/media/gpu/tegra_v4l2_device.h
similarity index 87%
rename from content/common/gpu/media/tegra_v4l2_device.h
rename to media/gpu/tegra_v4l2_device.h
index 30376db..eac12947 100644
--- a/content/common/gpu/media/tegra_v4l2_device.h
+++ b/media/gpu/tegra_v4l2_device.h
@@ -5,17 +5,17 @@
 // This file contains the implementation of TegraV4L2Device used on
 // Tegra platform.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_TEGRA_V4L2_DEVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_TEGRA_V4L2_DEVICE_H_
+#ifndef MEDIA_GPU_TEGRA_V4L2_DEVICE_H_
+#define MEDIA_GPU_TEGRA_V4L2_DEVICE_H_
 
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/macros.h"
-#include "content/common/gpu/media/v4l2_device.h"
+#include "media/gpu/v4l2_device.h"
 #include "ui/gl/gl_bindings.h"
 
-namespace content {
+namespace media {
 
 // This class implements the V4L2Device interface for Tegra platform.
 // It interfaces with libtegrav4l2 library which provides API that exhibit the
@@ -62,6 +62,6 @@
   DISALLOW_COPY_AND_ASSIGN(TegraV4L2Device);
 };
 
-}  //  namespace content
+}  //  namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_TEGRA_V4L2_DEVICE_H_
+#endif  // MEDIA_GPU_TEGRA_V4L2_DEVICE_H_
diff --git a/content/common/gpu/media/v4l2.sig b/media/gpu/v4l2.sig
similarity index 100%
rename from content/common/gpu/media/v4l2.sig
rename to media/gpu/v4l2.sig
diff --git a/content/common/gpu/media/v4l2_device.cc b/media/gpu/v4l2_device.cc
similarity index 94%
rename from content/common/gpu/media/v4l2_device.cc
rename to media/gpu/v4l2_device.cc
index bd050a4..d80989d 100644
--- a/content/common/gpu/media/v4l2_device.cc
+++ b/media/gpu/v4l2_device.cc
@@ -8,18 +8,16 @@
 
 #include "base/numerics/safe_conversions.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/generic_v4l2_device.h"
+#include "media/gpu/generic_v4l2_device.h"
 #if defined(ARCH_CPU_ARMEL)
-#include "content/common/gpu/media/tegra_v4l2_device.h"
+#include "media/gpu/tegra_v4l2_device.h"
 #endif
 
-namespace content {
+namespace media {
 
-V4L2Device::V4L2Device(Type type) : type_(type) {
-}
+V4L2Device::V4L2Device(Type type) : type_(type) {}
 
-V4L2Device::~V4L2Device() {
-}
+V4L2Device::~V4L2Device() {}
 
 // static
 scoped_refptr<V4L2Device> V4L2Device::Create(Type type) {
@@ -86,8 +84,7 @@
 uint32_t V4L2Device::VideoCodecProfileToV4L2PixFmt(
     media::VideoCodecProfile profile,
     bool slice_based) {
-  if (profile >= media::H264PROFILE_MIN &&
-      profile <= media::H264PROFILE_MAX) {
+  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
     if (slice_based)
       return V4L2_PIX_FMT_H264_SLICE;
     else
@@ -245,14 +242,14 @@
   if (max_resolution->IsEmpty()) {
     max_resolution->SetSize(1920, 1088);
     LOG(ERROR) << "GetSupportedResolution failed to get maximum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << max_resolution->ToString();
+               << "fourcc " << std::hex << pixelformat << ", fall back to "
+               << max_resolution->ToString();
   }
   if (min_resolution->IsEmpty()) {
     min_resolution->SetSize(16, 16);
     LOG(ERROR) << "GetSupportedResolution failed to get minimum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << min_resolution->ToString();
+               << "fourcc " << std::hex << pixelformat << ", fall back to "
+               << min_resolution->ToString();
   }
 }
 
@@ -321,4 +318,4 @@
   return iter != supported_profiles.end();
 }
 
-}  //  namespace content
+}  //  namespace media
diff --git a/content/common/gpu/media/v4l2_device.h b/media/gpu/v4l2_device.h
similarity index 93%
rename from content/common/gpu/media/v4l2_device.h
rename to media/gpu/v4l2_device.h
index 10905f6..1c765e95 100644
--- a/content/common/gpu/media/v4l2_device.h
+++ b/media/gpu/v4l2_device.h
@@ -6,8 +6,8 @@
 // V4L2DecodeAccelerator class to delegate/pass the device specific
 // handling of any of the functionalities.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_DEVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_DEVICE_H_
+#ifndef MEDIA_GPU_V4L2_DEVICE_H_
+#define MEDIA_GPU_V4L2_DEVICE_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -16,9 +16,9 @@
 
 #include "base/files/scoped_file.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/content_export.h"
 #include "media/base/video_decoder_config.h"
 #include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gfx/geometry/size.h"
 #include "ui/gl/gl_bindings.h"
@@ -29,9 +29,9 @@
 #define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
 #define V4L2_PIX_FMT_MT21 v4l2_fourcc('M', 'T', '2', '1')
 
-namespace content {
+namespace media {
 
-class CONTENT_EXPORT V4L2Device
+class MEDIA_GPU_EXPORT V4L2Device
     : public base::RefCountedThreadSafe<V4L2Device> {
  public:
   // Utility format conversion functions
@@ -131,13 +131,15 @@
 
   // Get minimum and maximum resolution for fourcc |pixelformat| and store to
   // |min_resolution| and |max_resolution|.
-  void GetSupportedResolution(uint32_t pixelformat, gfx::Size* min_resolution,
+  void GetSupportedResolution(uint32_t pixelformat,
+                              gfx::Size* min_resolution,
                               gfx::Size* max_resolution);
 
   // Return supported profiles for decoder, including only profiles for given
   // fourcc |pixelformats|.
   media::VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles(
-      const size_t num_formats, const uint32_t pixelformats[]);
+      const size_t num_formats,
+      const uint32_t pixelformats[]);
 
   // Return true if the device supports |profile|, taking into account only
   // fourccs from the given array of |pixelformats| of size |num_formats|.
@@ -154,6 +156,6 @@
   const Type type_;
 };
 
-}  //  namespace content
+}  //  namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_DEVICE_H_
+#endif  // MEDIA_GPU_V4L2_DEVICE_H_
diff --git a/content/common/gpu/media/v4l2_image_processor.cc b/media/gpu/v4l2_image_processor.cc
similarity index 94%
rename from content/common/gpu/media/v4l2_image_processor.cc
rename to media/gpu/v4l2_image_processor.cc
index 60908090..f1815d8 100644
--- a/content/common/gpu/media/v4l2_image_processor.cc
+++ b/media/gpu/v4l2_image_processor.cc
@@ -16,14 +16,14 @@
 #include "base/callback.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/thread_task_runner_handle.h"
-#include "content/common/gpu/media/v4l2_image_processor.h"
+#include "media/gpu/v4l2_image_processor.h"
 
-#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)        \
-  do {                                                                 \
-    if (device_->Ioctl(type, arg) != 0) {                              \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str;   \
-      return value;                                                    \
-    }                                                                  \
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)      \
+  do {                                                               \
+    if (device_->Ioctl(type, arg) != 0) {                            \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str; \
+      return value;                                                  \
+    }                                                                \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN(type, arg) \
@@ -32,29 +32,25 @@
 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
   IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
 
-#define IOCTL_OR_LOG_ERROR(type, arg)                              \
-  do {                                                             \
-    if (device_->Ioctl(type, arg) != 0)                            \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type;  \
+#define IOCTL_OR_LOG_ERROR(type, arg)                             \
+  do {                                                            \
+    if (device_->Ioctl(type, arg) != 0)                           \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type; \
   } while (0)
 
-namespace content {
+namespace media {
 
-V4L2ImageProcessor::InputRecord::InputRecord() : at_device(false) {
-}
+V4L2ImageProcessor::InputRecord::InputRecord() : at_device(false) {}
 
-V4L2ImageProcessor::InputRecord::~InputRecord() {
-}
+V4L2ImageProcessor::InputRecord::~InputRecord() {}
 
 V4L2ImageProcessor::OutputRecord::OutputRecord() : at_device(false) {}
 
-V4L2ImageProcessor::OutputRecord::~OutputRecord() {
-}
+V4L2ImageProcessor::OutputRecord::~OutputRecord() {}
 
 V4L2ImageProcessor::JobRecord::JobRecord() : output_buffer_index(-1) {}
 
-V4L2ImageProcessor::JobRecord::~JobRecord() {
-}
+V4L2ImageProcessor::JobRecord::~JobRecord() {}
 
 V4L2ImageProcessor::V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device)
     : input_format_(media::PIXEL_FORMAT_UNKNOWN),
@@ -140,7 +136,8 @@
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
     LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
-                  "caps check failed: 0x" << std::hex << caps.capabilities;
+                  "caps check failed: 0x"
+               << std::hex << caps.capabilities;
     return false;
   }
 
@@ -233,10 +230,8 @@
   job_record->ready_cb = cb;
 
   device_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2ImageProcessor::ProcessTask,
-                 base::Unretained(this),
-                 base::Passed(&job_record)));
+      FROM_HERE, base::Bind(&V4L2ImageProcessor::ProcessTask,
+                            base::Unretained(this), base::Passed(&job_record)));
 }
 
 void V4L2ImageProcessor::ProcessTask(std::unique_ptr<JobRecord> job_record) {
@@ -314,8 +309,8 @@
   input_planes_count_ = format.fmt.pix_mp.num_planes;
   DCHECK_LE(input_planes_count_, static_cast<size_t>(VIDEO_MAX_PLANES));
   input_allocated_size_ = V4L2Device::CodedSizeFromV4L2Format(format);
-  DCHECK(gfx::Rect(input_allocated_size_).Contains(
-      gfx::Rect(input_visible_size_)));
+  DCHECK(gfx::Rect(input_allocated_size_)
+             .Contains(gfx::Rect(input_visible_size_)));
 
   struct v4l2_crop crop;
   memset(&crop, 0, sizeof(crop));
@@ -364,8 +359,8 @@
   DCHECK_LE(output_planes_count_, static_cast<size_t>(VIDEO_MAX_PLANES));
   gfx::Size adjusted_allocated_size =
       V4L2Device::CodedSizeFromV4L2Format(format);
-  DCHECK(gfx::Rect(adjusted_allocated_size).Contains(
-      gfx::Rect(output_allocated_size_)));
+  DCHECK(gfx::Rect(adjusted_allocated_size)
+             .Contains(gfx::Rect(output_allocated_size_)));
   output_allocated_size_ = adjusted_allocated_size;
 
   struct v4l2_crop crop;
@@ -436,9 +431,8 @@
   // All processing should happen on ServiceDeviceTask(), since we shouldn't
   // touch encoder state from this thread.
   device_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2ImageProcessor::ServiceDeviceTask,
-                 base::Unretained(this)));
+      FROM_HERE, base::Bind(&V4L2ImageProcessor::ServiceDeviceTask,
+                            base::Unretained(this)));
 }
 
 void V4L2ImageProcessor::ServiceDeviceTask() {
@@ -461,10 +455,8 @@
       (input_buffer_queued_count_ > 0 && output_buffer_queued_count_ > 0);
 
   device_poll_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2ImageProcessor::DevicePollTask,
-                 base::Unretained(this),
-                 poll_device));
+      FROM_HERE, base::Bind(&V4L2ImageProcessor::DevicePollTask,
+                            base::Unretained(this), poll_device));
 
   DVLOG(2) << __func__ << ": buffer counts: INPUT[" << input_queue_.size()
            << "] => DEVICE[" << free_input_buffers_.size() << "+"
@@ -607,8 +599,10 @@
   qbuf.m.planes = qbuf_planes;
   qbuf.length = input_planes_count_;
   for (size_t i = 0; i < input_planes_count_; ++i) {
-    qbuf.m.planes[i].bytesused = media::VideoFrame::PlaneSize(
-        input_record.frame->format(), i, input_allocated_size_).GetArea();
+    qbuf.m.planes[i].bytesused =
+        media::VideoFrame::PlaneSize(input_record.frame->format(), i,
+                                     input_allocated_size_)
+            .GetArea();
     qbuf.m.planes[i].length = qbuf.m.planes[i].bytesused;
     if (input_memory_type_ == V4L2_MEMORY_USERPTR) {
       qbuf.m.planes[i].m.userptr =
@@ -664,9 +658,8 @@
   // Enqueue a poll task with no devices to poll on - will wait only for the
   // poll interrupt
   device_poll_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(
-          &V4L2ImageProcessor::DevicePollTask, base::Unretained(this), false));
+      FROM_HERE, base::Bind(&V4L2ImageProcessor::DevicePollTask,
+                            base::Unretained(this), false));
 
   return true;
 }
@@ -726,4 +719,4 @@
   cb.Run(output_buffer_index);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/v4l2_image_processor.h b/media/gpu/v4l2_image_processor.h
similarity index 94%
rename from content/common/gpu/media/v4l2_image_processor.h
rename to media/gpu/v4l2_image_processor.h
index 1390da4..e448a4d 100644
--- a/content/common/gpu/media/v4l2_image_processor.h
+++ b/media/gpu/v4l2_image_processor.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_IMAGE_PROCESSOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_IMAGE_PROCESSOR_H_
+#ifndef MEDIA_GPU_V4L2_IMAGE_PROCESSOR_H_
+#define MEDIA_GPU_V4L2_IMAGE_PROCESSOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -17,16 +17,16 @@
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/v4l2_device.h"
 #include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/v4l2_device.h"
 
-namespace content {
+namespace media {
 
 // Handles image processing accelerators that expose a V4L2 memory-to-memory
 // interface. The threading model of this class is the same as for other V4L2
 // hardware accelerators (see V4L2VideoDecodeAccelerator) for more details.
-class CONTENT_EXPORT V4L2ImageProcessor {
+class MEDIA_GPU_EXPORT V4L2ImageProcessor {
  public:
   explicit V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device);
   virtual ~V4L2ImageProcessor();
@@ -180,8 +180,8 @@
 
   // All the below members are to be accessed from device_thread_ only
   // (if it's running).
-  std::queue<linked_ptr<JobRecord> > input_queue_;
-  std::queue<linked_ptr<JobRecord> > running_jobs_;
+  std::queue<linked_ptr<JobRecord>> input_queue_;
+  std::queue<linked_ptr<JobRecord>> running_jobs_;
 
   // Input queue state.
   bool input_streamon_;
@@ -218,6 +218,6 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2ImageProcessor);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_IMAGE_PROCESSOR_H_
+#endif  // MEDIA_GPU_V4L2_IMAGE_PROCESSOR_H_
diff --git a/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc b/media/gpu/v4l2_jpeg_decode_accelerator.cc
similarity index 87%
rename from content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
rename to media/gpu/v4l2_jpeg_decode_accelerator.cc
index 7af2759..bf43cfd1 100644
--- a/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
+++ b/media/gpu/v4l2_jpeg_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/v4l2_jpeg_decode_accelerator.h"
+#include "media/gpu/v4l2_jpeg_decode_accelerator.h"
 
 #include <errno.h>
 #include <linux/videodev2.h>
@@ -15,6 +15,7 @@
 #include "base/bind.h"
 #include "base/thread_task_runner_handle.h"
 #include "media/filters/jpeg_parser.h"
+#include "media/gpu/v4l2_jpeg_decode_accelerator.h"
 #include "third_party/libyuv/include/libyuv.h"
 
 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_name)      \
@@ -62,7 +63,7 @@
     *(out) = _out;                                                         \
   } while (0)
 
-namespace content {
+namespace media {
 
 // This is default huffman segment for 8-bit precision luminance and
 // chrominance. The default huffman segment is constructed with the tables from
@@ -70,49 +71,46 @@
 // typical tables. These tables are useful for many applications. Lots of
 // softwares use them as standard tables such as ffmpeg.
 const uint8_t kDefaultDhtSeg[] = {
-  0xFF, 0xC4, 0x01, 0xA2, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01,
-  0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
-  0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, 0x00, 0x03,
-  0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
-  0x0A, 0x0B, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05,
-  0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04,
-  0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22,
-  0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15,
-  0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
-  0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36,
-  0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
-  0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
-  0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
-  0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95,
-  0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
-  0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2,
-  0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5,
-  0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
-  0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9,
-  0xFA, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
-  0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
-  0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22,
-  0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33,
-  0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
-  0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36,
-  0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
-  0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
-  0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
-  0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94,
-  0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
-  0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA,
-  0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4,
-  0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
-  0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA
-};
+    0xFF, 0xC4, 0x01, 0xA2, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01,
+    0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
+    0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, 0x00, 0x03,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+    0x0A, 0x0B, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05,
+    0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04,
+    0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22,
+    0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15,
+    0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
+    0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36,
+    0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
+    0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+    0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
+    0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95,
+    0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+    0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2,
+    0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5,
+    0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+    0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9,
+    0xFA, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
+    0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
+    0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22,
+    0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33,
+    0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
+    0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36,
+    0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
+    0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+    0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
+    0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94,
+    0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+    0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA,
+    0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4,
+    0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+    0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA};
 
 V4L2JpegDecodeAccelerator::BufferRecord::BufferRecord()
-    : address(nullptr), length(0), at_device(false) {
-}
+    : address(nullptr), length(0), at_device(false) {}
 
-V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {
-}
+V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {}
 
 V4L2JpegDecodeAccelerator::JobRecord::JobRecord(
     const media::BitstreamBuffer& bitstream_buffer,
@@ -121,8 +119,7 @@
       shm(bitstream_buffer, true),
       out_frame(video_frame) {}
 
-V4L2JpegDecodeAccelerator::JobRecord::~JobRecord() {
-}
+V4L2JpegDecodeAccelerator::JobRecord::~JobRecord() {}
 
 V4L2JpegDecodeAccelerator::V4L2JpegDecodeAccelerator(
     const scoped_refptr<V4L2Device>& device,
@@ -180,13 +177,11 @@
   client_->NotifyError(bitstream_buffer_id, error);
 }
 
-void V4L2JpegDecodeAccelerator::PostNotifyError(
-    int32_t bitstream_buffer_id,
-    Error error) {
+void V4L2JpegDecodeAccelerator::PostNotifyError(int32_t bitstream_buffer_id,
+                                                Error error) {
   child_task_runner_->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2JpegDecodeAccelerator::NotifyError, weak_ptr_,
-                 bitstream_buffer_id, error));
+      FROM_HERE, base::Bind(&V4L2JpegDecodeAccelerator::NotifyError, weak_ptr_,
+                            bitstream_buffer_id, error));
 }
 
 bool V4L2JpegDecodeAccelerator::Initialize(Client* client) {
@@ -527,8 +522,8 @@
 
   if (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
     if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
-      DVLOG(3) << __func__ << ": got source change event: "
-               << ev.u.src_change.changes;
+      DVLOG(3) << __func__
+               << ": got source change event: " << ev.u.src_change.changes;
       if (ev.u.src_change.changes &
           (V4L2_EVENT_SRC_CH_RESOLUTION | V4L2_EVENT_SRC_CH_PIXELFORMAT)) {
         return true;
@@ -558,8 +553,10 @@
     return;
 
   if (event_pending) {
-    if (!DequeueSourceChangeEvent()) return;
-    if (!RecreateOutputBuffers()) return;
+    if (!DequeueSourceChangeEvent())
+      return;
+    if (!RecreateOutputBuffers())
+      return;
   }
 
   EnqueueInput();
@@ -571,12 +568,10 @@
                               base::Unretained(this)));
   }
 
-  DVLOG(2) << __func__ << ": buffer counts: INPUT["
-           << input_jobs_.size() << "] => DEVICE["
-           << free_input_buffers_.size() << "/"
-           << input_buffer_map_.size() << "->"
-           << free_output_buffers_.size() << "/"
-           << output_buffer_map_.size() << "]";
+  DVLOG(2) << __func__ << ": buffer counts: INPUT[" << input_jobs_.size()
+           << "] => DEVICE[" << free_input_buffers_.size() << "/"
+           << input_buffer_map_.size() << "->" << free_output_buffers_.size()
+           << "/" << output_buffer_map_.size() << "]";
 }
 
 void V4L2JpegDecodeAccelerator::EnqueueInput() {
@@ -603,7 +598,7 @@
   // Output record can be enqueued because the output coded sizes of the frames
   // currently in the pipeline are all the same.
   while (running_jobs_.size() > OutputBufferQueuedCount() &&
-      !free_output_buffers_.empty()) {
+         !free_output_buffers_.empty()) {
     if (!EnqueueOutputRecord())
       return;
   }
@@ -631,18 +626,12 @@
   size_t dst_v_stride = dst_frame->stride(media::VideoFrame::kVPlane);
 
   // If the source format is I420, ConvertToI420 will simply copy the frame.
-  if (libyuv::ConvertToI420(static_cast<uint8_t*>(const_cast<void*>(src_addr)),
-                            src_size,
-                            dst_y, dst_y_stride,
-                            dst_u, dst_u_stride,
-                            dst_v, dst_v_stride,
-                            0, 0,
-                            src_coded_size.width(),
-                            src_coded_size.height(),
-                            dst_frame->coded_size().width(),
-                            dst_frame->coded_size().height(),
-                            libyuv::kRotate0,
-                            src_pixelformat)) {
+  if (libyuv::ConvertToI420(
+          static_cast<uint8_t*>(const_cast<void*>(src_addr)), src_size, dst_y,
+          dst_y_stride, dst_u, dst_u_stride, dst_v, dst_v_stride, 0, 0,
+          src_coded_size.width(), src_coded_size.height(),
+          dst_frame->coded_size().width(), dst_frame->coded_size().height(),
+          libyuv::kRotate0, src_pixelformat)) {
     LOG(ERROR) << "ConvertToI420 failed. Source format: " << src_pixelformat;
     return false;
   }
@@ -899,4 +888,4 @@
   return true;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h b/media/gpu/v4l2_jpeg_decode_accelerator.h
similarity index 91%
rename from content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
rename to media/gpu/v4l2_jpeg_decode_accelerator.h
index da61120..97f067c1 100644
--- a/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
+++ b/media/gpu/v4l2_jpeg_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_JPEG_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_JPEG_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_V4L2_JPEG_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_JPEG_DECODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -18,16 +18,16 @@
 #include "base/memory/weak_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/shared_memory_region.h"
-#include "content/common/gpu/media/v4l2_device.h"
 #include "media/base/bitstream_buffer.h"
 #include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/gpu/v4l2_device.h"
 #include "media/video/jpeg_decode_accelerator.h"
 
-namespace content {
+namespace media {
 
-class CONTENT_EXPORT V4L2JpegDecodeAccelerator
+class MEDIA_GPU_EXPORT V4L2JpegDecodeAccelerator
     : public media::JpegDecodeAccelerator {
  public:
   V4L2JpegDecodeAccelerator(
@@ -46,8 +46,8 @@
   struct BufferRecord {
     BufferRecord();
     ~BufferRecord();
-    void* address;          // mmap() address.
-    size_t length;          // mmap() length.
+    void* address;  // mmap() address.
+    size_t length;  // mmap() length.
 
     // Set true during QBUF and DQBUF. |address| will be accessed by hardware.
     bool at_device;
@@ -178,6 +178,6 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2JpegDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_JPEG_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_V4L2_JPEG_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/media/gpu/v4l2_slice_video_decode_accelerator.cc
similarity index 95%
rename from content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
rename to media/gpu/v4l2_slice_video_decode_accelerator.cc
index 80c570b..1b4cce7b 100644
--- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
+++ b/media/gpu/v4l2_slice_video_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
+#include "media/gpu/v4l2_slice_video_decode_accelerator.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -24,9 +24,9 @@
 #include "base/memory/ptr_util.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/strings/stringprintf.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/media_switches.h"
+#include "media/gpu/shared_memory_region.h"
 #include "ui/gl/gl_context.h"
 #include "ui/gl/scoped_binders.h"
 
@@ -39,12 +39,12 @@
     SetErrorState(x);                          \
   } while (0)
 
-#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)           \
-  do {                                                                    \
-    if (device_->Ioctl(type, arg) != 0) {                                 \
-      PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << type_str;  \
-      return value;                                                       \
-    }                                                                     \
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)          \
+  do {                                                                   \
+    if (device_->Ioctl(type, arg) != 0) {                                \
+      PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << type_str; \
+      return value;                                                      \
+    }                                                                    \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN(type, arg) \
@@ -59,7 +59,7 @@
       PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
   } while (0)
 
-namespace content {
+namespace media {
 
 // static
 const uint32_t V4L2SliceVideoDecodeAccelerator::supported_input_fourccs_[] = {
@@ -159,8 +159,7 @@
       address(nullptr),
       length(0),
       bytes_used(0),
-      at_device(false) {
-}
+      at_device(false) {}
 
 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
     : at_device(false),
@@ -216,8 +215,7 @@
 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
     EGLDisplay egl_display,
     EGLSyncKHR egl_sync)
-    : egl_display(egl_display), egl_sync(egl_sync) {
-}
+    : egl_display(egl_display), egl_sync(egl_sync) {}
 
 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
   // We don't check for eglDestroySyncKHR failures, because if we get here
@@ -237,11 +235,9 @@
 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
     bool cleared,
     const media::Picture& picture)
-    : cleared(cleared), picture(picture) {
-}
+    : cleared(cleared), picture(picture) {}
 
-V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {
-}
+V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
 
 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
     : public H264Decoder::H264Accelerator {
@@ -327,8 +323,9 @@
 // This allows us to keep decoders oblivious of our implementation details.
 class V4L2H264Picture : public H264Picture {
  public:
-  V4L2H264Picture(const scoped_refptr<
-      V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface);
+  V4L2H264Picture(
+      const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+          dec_surface);
 
   V4L2H264Picture* AsV4L2H264Picture() override { return this; }
   scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
@@ -345,18 +342,18 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture);
 };
 
-V4L2H264Picture::V4L2H264Picture(const scoped_refptr<
-    V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface)
-    : dec_surface_(dec_surface) {
-}
+V4L2H264Picture::V4L2H264Picture(
+    const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
 
-V4L2H264Picture::~V4L2H264Picture() {
-}
+V4L2H264Picture::~V4L2H264Picture() {}
 
 class V4L2VP8Picture : public VP8Picture {
  public:
-  V4L2VP8Picture(const scoped_refptr<
-      V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface);
+  V4L2VP8Picture(
+      const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+          dec_surface);
 
   V4L2VP8Picture* AsV4L2VP8Picture() override { return this; }
   scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
@@ -373,13 +370,12 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture);
 };
 
-V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr<
-    V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface)
-    : dec_surface_(dec_surface) {
-}
+V4L2VP8Picture::V4L2VP8Picture(
+    const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
 
-V4L2VP8Picture::~V4L2VP8Picture() {
-}
+V4L2VP8Picture::~V4L2VP8Picture() {}
 
 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
     const scoped_refptr<V4L2Device>& device,
@@ -517,7 +513,8 @@
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
     LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
-                  ", caps check failed: 0x" << std::hex << caps.capabilities;
+                  ", caps check failed: 0x"
+               << std::hex << caps.capabilities;
     return false;
   }
 
@@ -701,10 +698,8 @@
     buffer.m.planes = planes;
     buffer.length = input_planes_count_;
     IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
-    void* address = device_->Mmap(nullptr,
-                                  buffer.m.planes[0].length,
-                                  PROT_READ | PROT_WRITE,
-                                  MAP_SHARED,
+    void* address = device_->Mmap(nullptr, buffer.m.planes[0].length,
+                                  PROT_READ | PROT_WRITE, MAP_SHARED,
                                   buffer.m.planes[0].m.mem_offset);
     if (address == MAP_FAILED) {
       PLOG(ERROR) << "CreateInputBuffers(): mmap() failed";
@@ -863,13 +858,11 @@
 
   DVLOGF(2) << "buffer counts: "
             << "INPUT[" << decoder_input_queue_.size() << "]"
-            << " => DEVICE["
-            << free_input_buffers_.size() << "+"
-            << input_buffer_queued_count_ << "/"
-            << input_buffer_map_.size() << "]->["
-            << free_output_buffers_.size() << "+"
-            << output_buffer_queued_count_ << "/"
-            << output_buffer_map_.size() << "]"
+            << " => DEVICE[" << free_input_buffers_.size() << "+"
+            << input_buffer_queued_count_ << "/" << input_buffer_map_.size()
+            << "]->[" << free_output_buffers_.size() << "+"
+            << output_buffer_queued_count_ << "/" << output_buffer_map_.size()
+            << "]"
             << " => DISPLAYQ[" << decoder_display_queue_.size() << "]"
             << " => CLIENT[" << surfaces_at_display_.size() << "]";
 }
@@ -895,8 +888,9 @@
   }
 
   bool inserted =
-      surfaces_at_device_.insert(std::make_pair(dec_surface->output_record(),
-                                                dec_surface)).second;
+      surfaces_at_device_
+          .insert(std::make_pair(dec_surface->output_record(), dec_surface))
+          .second;
   DCHECK(inserted);
 
   if (old_inputs_queued == 0 && old_outputs_queued == 0)
@@ -958,8 +952,8 @@
     DCHECK(output_record.at_device);
     output_record.at_device = false;
     output_buffer_queued_count_--;
-    DVLOGF(3) << "Dequeued output=" << dqbuf.index
-              << " count " << output_buffer_queued_count_;
+    DVLOGF(3) << "Dequeued output=" << dqbuf.index << " count "
+              << output_buffer_queued_count_;
 
     V4L2DecodeSurfaceByOutputId::iterator it =
         surfaces_at_device_.find(dqbuf.index);
@@ -1032,8 +1026,9 @@
   input_record.input_id = -1;
   input_record.bytes_used = 0;
 
-  DCHECK_EQ(std::count(free_input_buffers_.begin(), free_input_buffers_.end(),
-            index), 0);
+  DCHECK_EQ(
+      std::count(free_input_buffers_.begin(), free_input_buffers_.end(), index),
+      0);
   free_input_buffers_.push_back(index);
 }
 
@@ -1047,7 +1042,8 @@
   DCHECK(!output_record.at_client);
 
   DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
-            index), 0);
+                       index),
+            0);
   free_output_buffers_.push_back(index);
 
   ScheduleDecodeBufferTaskIfNeeded();
@@ -1508,8 +1504,8 @@
 
   if (buffers.size() < req_buffer_count) {
     DLOG(ERROR) << "Failed to provide requested picture buffers. "
-                << "(Got " << buffers.size()
-                << ", requested " << req_buffer_count << ")";
+                << "(Got " << buffers.size() << ", requested "
+                << req_buffer_count << ")";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1996,8 +1992,7 @@
   DCHECK(v4l2_dec_);
 }
 
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {
-}
+V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {}
 
 scoped_refptr<H264Picture>
 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
@@ -2065,12 +2060,19 @@
   struct v4l2_ctrl_h264_sps v4l2_sps;
   memset(&v4l2_sps, 0, sizeof(v4l2_sps));
   v4l2_sps.constraint_set_flags =
-    sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0 |
-    sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0 |
-    sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0 |
-    sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0 |
-    sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0 |
-    sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0;
+      sps->constraint_set0_flag
+          ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG
+          : 0 | sps->constraint_set1_flag
+                ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG
+                : 0 | sps->constraint_set2_flag
+                      ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG
+                      : 0 | sps->constraint_set3_flag
+                            ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG
+                            : 0 | sps->constraint_set4_flag
+                                  ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG
+                                  : 0 | sps->constraint_set5_flag
+                                        ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG
+                                        : 0;
 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
   SPS_TO_V4L2SPS(profile_idc);
   SPS_TO_V4L2SPS(level_idc);
@@ -2086,7 +2088,7 @@
   SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle);
 
   static_assert(arraysize(v4l2_sps.offset_for_ref_frame) ==
-                arraysize(sps->offset_for_ref_frame),
+                    arraysize(sps->offset_for_ref_frame),
                 "offset_for_ref_frame arrays must be same size");
   for (size_t i = 0; i < arraysize(v4l2_sps.offset_for_ref_frame); ++i)
     v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i];
@@ -2160,13 +2162,13 @@
   struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix;
   memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix));
   static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
-                arraysize(pps->scaling_list4x4) &&
-                arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
-                arraysize(pps->scaling_list4x4[0]) &&
-                arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
-                arraysize(pps->scaling_list8x8) &&
-                arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
-                arraysize(pps->scaling_list8x8[0]),
+                        arraysize(pps->scaling_list4x4) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
+                        arraysize(pps->scaling_list4x4[0]) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
+                        arraysize(pps->scaling_list8x8) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
+                        arraysize(pps->scaling_list8x8[0]),
                 "scaling_lists must be of correct size");
   for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_4x4); ++i) {
     for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_4x4[i]);
@@ -2423,8 +2425,7 @@
   DCHECK(v4l2_dec_);
 }
 
-V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {
-}
+V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {}
 
 scoped_refptr<VP8Picture>
 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
@@ -2506,8 +2507,7 @@
                        vp8_entropy_hdr.y_mode_probs);
   ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs,
                        vp8_entropy_hdr.uv_mode_probs);
-  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs,
-                       vp8_entropy_hdr.mv_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs);
 }
 
 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
@@ -2565,10 +2565,11 @@
   v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions;
 
   static_assert(arraysize(v4l2_frame_hdr.dct_part_sizes) ==
-                arraysize(frame_hdr->dct_partition_sizes),
+                    arraysize(frame_hdr->dct_partition_sizes),
                 "DCT partition size arrays must have equal number of elements");
   for (size_t i = 0; i < frame_hdr->num_of_dct_partitions &&
-       i < arraysize(v4l2_frame_hdr.dct_part_sizes); ++i)
+                     i < arraysize(v4l2_frame_hdr.dct_part_sizes);
+       ++i)
     v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i];
 
   scoped_refptr<V4L2DecodeSurface> dec_surface =
@@ -2682,8 +2683,9 @@
       output_buffer_map_[dec_surface->output_record()];
 
   bool inserted =
-      surfaces_at_display_.insert(std::make_pair(output_record.picture_id,
-                                                 dec_surface)).second;
+      surfaces_at_display_
+          .insert(std::make_pair(output_record.picture_id, dec_surface))
+          .second;
   DCHECK(inserted);
 
   DCHECK(!output_record.at_client);
@@ -2809,4 +2811,4 @@
                                             supported_input_fourccs_);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h b/media/gpu/v4l2_slice_video_decode_accelerator.h
similarity index 96%
rename from content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
rename to media/gpu/v4l2_slice_video_decode_accelerator.h
index f917e1a4..4760ae12 100644
--- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
+++ b/media/gpu/v4l2_slice_video_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <linux/videodev2.h>
 #include <stddef.h>
@@ -20,20 +20,20 @@
 #include "base/memory/weak_ptr.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
-#include "content/common/gpu/media/h264_decoder.h"
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/vp8_decoder.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/h264_decoder.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/vp8_decoder.h"
 #include "media/video/video_decode_accelerator.h"
 
-namespace content {
+namespace media {
 
 // An implementation of VideoDecodeAccelerator that utilizes the V4L2 slice
 // level codec API for decoding. The slice level API provides only a low-level
 // decoding functionality and requires userspace to provide support for parsing
 // the input stream and managing decoder state across frames.
-class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
+class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
     : public media::VideoDecodeAccelerator {
  public:
   class V4L2DecodeSurface;
@@ -64,7 +64,7 @@
   media::VideoPixelFormat GetOutputFormat() const override;
 
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedProfiles();
+  GetSupportedProfiles();
 
  private:
   class V4L2H264Accelerator;
@@ -228,7 +228,6 @@
   // Process pending events, if any.
   void ProcessPendingEventsIfNeeded();
 
-
   // Allocate V4L2 buffers and assign them to |buffers| provided by the client
   // via AssignPictureBuffers() on decoder thread.
   void AssignPictureBuffersTask(
@@ -468,6 +467,6 @@
 class V4L2H264Picture;
 class V4L2VP8Picture;
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/v4l2_stub_header.fragment b/media/gpu/v4l2_stub_header.fragment
similarity index 100%
rename from content/common/gpu/media/v4l2_stub_header.fragment
rename to media/gpu/v4l2_stub_header.fragment
diff --git a/content/common/gpu/media/v4l2_video_decode_accelerator.cc b/media/gpu/v4l2_video_decode_accelerator.cc
similarity index 93%
rename from content/common/gpu/media/v4l2_video_decode_accelerator.cc
rename to media/gpu/v4l2_video_decode_accelerator.cc
index 596c027..77d9f293 100644
--- a/content/common/gpu/media/v4l2_video_decode_accelerator.cc
+++ b/media/gpu/v4l2_video_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
+#include "media/gpu/v4l2_video_decode_accelerator.h"
 
 #include <dlfcn.h>
 #include <errno.h>
@@ -21,10 +21,10 @@
 #include "base/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/media_switches.h"
 #include "media/filters/h264_parser.h"
+#include "media/gpu/shared_memory_region.h"
 #include "ui/gfx/geometry/rect.h"
 #include "ui/gl/gl_context.h"
 #include "ui/gl/scoped_binders.h"
@@ -35,13 +35,13 @@
     SetErrorState(x);                          \
   } while (0)
 
-#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)        \
-  do {                                                                 \
-    if (device_->Ioctl(type, arg) != 0) {                              \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str;   \
-      NOTIFY_ERROR(PLATFORM_FAILURE);                                  \
-      return value;                                                    \
-    }                                                                  \
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)      \
+  do {                                                               \
+    if (device_->Ioctl(type, arg) != 0) {                            \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str; \
+      NOTIFY_ERROR(PLATFORM_FAILURE);                                \
+      return value;                                                  \
+    }                                                                \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN(type, arg) \
@@ -50,13 +50,13 @@
 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
   IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
 
-#define IOCTL_OR_LOG_ERROR(type, arg)                              \
-  do {                                                             \
-    if (device_->Ioctl(type, arg) != 0)                            \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type;  \
+#define IOCTL_OR_LOG_ERROR(type, arg)                             \
+  do {                                                            \
+    if (device_->Ioctl(type, arg) != 0)                           \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type; \
   } while (0)
 
-namespace content {
+namespace media {
 
 // static
 const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = {
@@ -110,11 +110,9 @@
   }
 }
 
-V4L2VideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
-    EGLDisplay egl_display, EGLSyncKHR egl_sync)
-    : egl_display(egl_display),
-      egl_sync(egl_sync) {
-}
+V4L2VideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(EGLDisplay egl_display,
+                                                         EGLSyncKHR egl_sync)
+    : egl_display(egl_display), egl_sync(egl_sync) {}
 
 V4L2VideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
   // We don't check for eglDestroySyncKHR failures, because if we get here
@@ -125,23 +123,16 @@
 }
 
 V4L2VideoDecodeAccelerator::InputRecord::InputRecord()
-    : at_device(false),
-      address(NULL),
-      length(0),
-      bytes_used(0),
-      input_id(-1) {
-}
+    : at_device(false), address(NULL), length(0), bytes_used(0), input_id(-1) {}
 
-V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {
-}
+V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {}
 
 V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
     : state(kFree),
       egl_image(EGL_NO_IMAGE_KHR),
       egl_sync(EGL_NO_SYNC_KHR),
       picture_id(-1),
-      cleared(false) {
-}
+      cleared(false) {}
 
 V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
 
@@ -267,7 +258,8 @@
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
     LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
-        ", caps check failed: 0x" << std::hex << caps.capabilities;
+                  ", caps check failed: 0x"
+               << std::hex << caps.capabilities;
     return false;
   }
 
@@ -297,10 +289,9 @@
 
   // StartDevicePoll will NOTIFY_ERROR on failure, so IgnoreResult is fine here.
   decoder_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(
-          base::IgnoreResult(&V4L2VideoDecodeAccelerator::StartDevicePoll),
-          base::Unretained(this)));
+      FROM_HERE, base::Bind(base::IgnoreResult(
+                                &V4L2VideoDecodeAccelerator::StartDevicePoll),
+                            base::Unretained(this)));
 
   return true;
 }
@@ -320,9 +311,9 @@
   }
 
   // DecodeTask() will take care of running a DecodeBufferTask().
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::DecodeTask, base::Unretained(this),
-      bitstream_buffer));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeTask,
+                            base::Unretained(this), bitstream_buffer));
 }
 
 void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
@@ -335,8 +326,8 @@
 
   if (buffers.size() < req_buffer_count) {
     LOG(ERROR) << "AssignPictureBuffers(): Failed to provide requested picture"
-                  " buffers. (Got " << buffers.size()
-               << ", requested " << req_buffer_count << ")";
+                  " buffers. (Got "
+               << buffers.size() << ", requested " << req_buffer_count << ")";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -356,8 +347,8 @@
   // Allocate the output buffers.
   struct v4l2_requestbuffers reqbufs;
   memset(&reqbufs, 0, sizeof(reqbufs));
-  reqbufs.count  = buffers.size();
-  reqbufs.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.count = buffers.size();
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
   reqbufs.memory = V4L2_MEMORY_MMAP;
   IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
 
@@ -475,23 +466,26 @@
 
   std::unique_ptr<EGLSyncKHRRef> egl_sync_ref(
       new EGLSyncKHRRef(egl_display_, egl_sync));
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
-      base::Unretained(this), picture_buffer_id, base::Passed(&egl_sync_ref)));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
+                            base::Unretained(this), picture_buffer_id,
+                            base::Passed(&egl_sync_ref)));
 }
 
 void V4L2VideoDecodeAccelerator::Flush() {
   DVLOG(3) << "Flush()";
   DCHECK(child_task_runner_->BelongsToCurrentThread());
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::FlushTask, base::Unretained(this)));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FlushTask,
+                            base::Unretained(this)));
 }
 
 void V4L2VideoDecodeAccelerator::Reset() {
   DVLOG(3) << "Reset()";
   DCHECK(child_task_runner_->BelongsToCurrentThread());
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::ResetTask, base::Unretained(this)));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetTask,
+                            base::Unretained(this)));
 }
 
 void V4L2VideoDecodeAccelerator::Destroy() {
@@ -504,8 +498,9 @@
 
   // If the decoder thread is running, destroy using posted task.
   if (decoder_thread_.IsRunning()) {
-    decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-        &V4L2VideoDecodeAccelerator::DestroyTask, base::Unretained(this)));
+    decoder_thread_.message_loop()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
     pictures_assigned_.Signal();
     // DestroyTask() will cause the decoder_thread_ to flush all tasks.
     decoder_thread_.Stop();
@@ -797,14 +792,15 @@
     buffers_to_decode++;
   if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) {
     decoder_decode_buffer_tasks_scheduled_++;
-    decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-        &V4L2VideoDecodeAccelerator::DecodeBufferTask,
-        base::Unretained(this)));
+    decoder_thread_.message_loop()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeBufferTask,
+                              base::Unretained(this)));
   }
 }
 
-bool V4L2VideoDecodeAccelerator::DecodeBufferInitial(
-    const void* data, size_t size, size_t* endpos) {
+bool V4L2VideoDecodeAccelerator::DecodeBufferInitial(const void* data,
+                                                     size_t size,
+                                                     size_t* endpos) {
   DVLOG(3) << "DecodeBufferInitial(): data=" << data << ", size=" << size;
   DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
   DCHECK_NE(decoder_state_, kUninitialized);
@@ -853,8 +849,8 @@
   return true;
 }
 
-bool V4L2VideoDecodeAccelerator::DecodeBufferContinue(
-    const void* data, size_t size) {
+bool V4L2VideoDecodeAccelerator::DecodeBufferContinue(const void* data,
+                                                      size_t size) {
   DVLOG(3) << "DecodeBufferContinue(): data=" << data << ", size=" << size;
   DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
   DCHECK_EQ(decoder_state_, kDecoding);
@@ -865,8 +861,8 @@
           (decoder_partial_frame_pending_ || FlushInputFrame()));
 }
 
-bool V4L2VideoDecodeAccelerator::AppendToInputFrame(
-    const void* data, size_t size) {
+bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data,
+                                                    size_t size) {
   DVLOG(3) << "AppendToInputFrame()";
   DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
   DCHECK_NE(decoder_state_, kUninitialized);
@@ -916,8 +912,7 @@
   }
 
   // Copy in to the buffer.
-  InputRecord& input_record =
-      input_buffer_map_[decoder_current_input_buffer_];
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
   if (size > input_record.length - input_record.bytes_used) {
     LOG(ERROR) << "AppendToInputFrame(): over-size frame, erroring";
     NOTIFY_ERROR(UNREADABLE_INPUT);
@@ -941,8 +936,7 @@
   if (decoder_current_input_buffer_ == -1)
     return true;
 
-  InputRecord& input_record =
-      input_buffer_map_[decoder_current_input_buffer_];
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
   DCHECK_NE(input_record.input_id, -1);
   DCHECK(input_record.input_id != kFlushBufferId ||
          input_record.bytes_used == 0);
@@ -1013,22 +1007,17 @@
   DCHECK(device_poll_thread_.message_loop());
   // Queue the DevicePollTask() now.
   device_poll_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
-                 base::Unretained(this),
-                 poll_device));
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), poll_device));
 
   DVLOG(1) << "ServiceDeviceTask(): buffer counts: DEC["
-           << decoder_input_queue_.size() << "->"
-           << input_ready_queue_.size() << "] => DEVICE["
-           << free_input_buffers_.size() << "+"
-           << input_buffer_queued_count_ << "/"
-           << input_buffer_map_.size() << "->"
-           << free_output_buffers_.size() << "+"
-           << output_buffer_queued_count_ << "/"
-           << output_buffer_map_.size() << "] => PROCESSOR["
-           << image_processor_bitstream_buffer_ids_.size() << "] => CLIENT["
-           << decoder_frames_at_client_ << "]";
+           << decoder_input_queue_.size() << "->" << input_ready_queue_.size()
+           << "] => DEVICE[" << free_input_buffers_.size() << "+"
+           << input_buffer_queued_count_ << "/" << input_buffer_map_.size()
+           << "->" << free_output_buffers_.size() << "+"
+           << output_buffer_queued_count_ << "/" << output_buffer_map_.size()
+           << "] => PROCESSOR[" << image_processor_bitstream_buffer_ids_.size()
+           << "] => CLIENT[" << decoder_frames_at_client_ << "]";
 
   ScheduleDecodeBufferTaskIfNeeded();
   if (resolution_change_pending)
@@ -1123,7 +1112,7 @@
     struct v4l2_plane planes[1];
     memset(&dqbuf, 0, sizeof(dqbuf));
     memset(planes, 0, sizeof(planes));
-    dqbuf.type   = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
     dqbuf.memory = V4L2_MEMORY_MMAP;
     dqbuf.m.planes = planes;
     dqbuf.length = 1;
@@ -1154,7 +1143,7 @@
         new v4l2_plane[output_planes_count_]);
     memset(&dqbuf, 0, sizeof(dqbuf));
     memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_);
-    dqbuf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
     dqbuf.memory = V4L2_MEMORY_MMAP;
     dqbuf.m.planes = planes.get();
     dqbuf.length = output_planes_count_;
@@ -1231,19 +1220,19 @@
   struct v4l2_plane qbuf_plane;
   memset(&qbuf, 0, sizeof(qbuf));
   memset(&qbuf_plane, 0, sizeof(qbuf_plane));
-  qbuf.index                 = buffer;
-  qbuf.type                  = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
-  qbuf.timestamp.tv_sec      = input_record.input_id;
-  qbuf.memory                = V4L2_MEMORY_MMAP;
-  qbuf.m.planes              = &qbuf_plane;
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  qbuf.timestamp.tv_sec = input_record.input_id;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = &qbuf_plane;
   qbuf.m.planes[0].bytesused = input_record.bytes_used;
-  qbuf.length                = 1;
+  qbuf.length = 1;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
   input_ready_queue_.pop();
   input_record.at_device = true;
   input_buffer_queued_count_++;
   DVLOG(3) << "EnqueueInputRecord(): enqueued input_id="
-           << input_record.input_id << " size="  << input_record.bytes_used;
+           << input_record.input_id << " size=" << input_record.bytes_used;
   return true;
 }
 
@@ -1279,11 +1268,11 @@
   std::unique_ptr<struct v4l2_plane[]> qbuf_planes(
       new v4l2_plane[output_planes_count_]);
   memset(&qbuf, 0, sizeof(qbuf));
-  memset(
-      qbuf_planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_);
-  qbuf.index    = buffer;
-  qbuf.type     = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
-  qbuf.memory   = V4L2_MEMORY_MMAP;
+  memset(qbuf_planes.get(), 0,
+         sizeof(struct v4l2_plane) * output_planes_count_);
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  qbuf.memory = V4L2_MEMORY_MMAP;
   qbuf.m.planes = qbuf_planes.get();
   qbuf.length = output_planes_count_;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
@@ -1479,8 +1468,9 @@
   // jobs will early-out in the kResetting state.
   decoder_state_ = kResetting;
   SendPictureReady();  // Send all pending PictureReady.
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::ResetDoneTask, base::Unretained(this)));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetDoneTask,
+                            base::Unretained(this)));
 }
 
 void V4L2VideoDecodeAccelerator::ResetDoneTask() {
@@ -1554,10 +1544,9 @@
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
-  device_poll_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::DevicePollTask,
-      base::Unretained(this),
-      0));
+  device_poll_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), 0));
 
   return true;
 }
@@ -1715,9 +1704,9 @@
 
   // All processing should happen on ServiceDeviceTask(), since we shouldn't
   // touch decoder state from this thread.
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::ServiceDeviceTask,
-      base::Unretained(this), event_pending));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
+                            base::Unretained(this), event_pending));
 }
 
 void V4L2VideoDecodeAccelerator::NotifyError(Error error) {
@@ -1741,9 +1730,9 @@
   // decoder thread isn't running.
   if (decoder_thread_.message_loop() != NULL &&
       decoder_thread_.message_loop() != base::MessageLoop::current()) {
-    decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-        &V4L2VideoDecodeAccelerator::SetErrorState,
-        base::Unretained(this), error));
+    decoder_thread_.message_loop()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::SetErrorState,
+                              base::Unretained(this), error));
     return;
   }
 
@@ -1861,8 +1850,8 @@
 
   struct v4l2_requestbuffers reqbufs;
   memset(&reqbufs, 0, sizeof(reqbufs));
-  reqbufs.count  = kInputBufferCount;
-  reqbufs.type   = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.count = kInputBufferCount;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
   reqbufs.memory = V4L2_MEMORY_MMAP;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
   input_buffer_map_.resize(reqbufs.count);
@@ -1874,17 +1863,15 @@
     struct v4l2_buffer buffer;
     memset(&buffer, 0, sizeof(buffer));
     memset(planes, 0, sizeof(planes));
-    buffer.index    = i;
-    buffer.type     = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
-    buffer.memory   = V4L2_MEMORY_MMAP;
+    buffer.index = i;
+    buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    buffer.memory = V4L2_MEMORY_MMAP;
     buffer.m.planes = planes;
-    buffer.length   = 1;
+    buffer.length = 1;
     IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
-    void* address = device_->Mmap(NULL,
-                                  buffer.m.planes[0].length,
-                                  PROT_READ | PROT_WRITE,
-                                  MAP_SHARED,
-                                  buffer.m.planes[0].m.mem_offset);
+    void* address =
+        device_->Mmap(NULL, buffer.m.planes[0].length, PROT_READ | PROT_WRITE,
+                      MAP_SHARED, buffer.m.planes[0].m.mem_offset);
     if (address == MAP_FAILED) {
       PLOG(ERROR) << "CreateInputBuffers(): mmap() failed";
       return false;
@@ -2158,9 +2145,9 @@
   }
 
   // Finish resolution change on decoder thread.
-  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
-      &V4L2VideoDecodeAccelerator::FinishResolutionChange,
-      base::Unretained(this)));
+  decoder_thread_.message_loop()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FinishResolutionChange,
+                            base::Unretained(this)));
 }
 
 void V4L2VideoDecodeAccelerator::SendPictureReady() {
@@ -2258,4 +2245,4 @@
   NOTIFY_ERROR(PLATFORM_FAILURE);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/v4l2_video_decode_accelerator.h b/media/gpu/v4l2_video_decode_accelerator.h
similarity index 92%
rename from content/common/gpu/media/v4l2_video_decode_accelerator.h
rename to media/gpu/v4l2_video_decode_accelerator.h
index 6c37a71..2ef9a9b 100644
--- a/content/common/gpu/media/v4l2_video_decode_accelerator.h
+++ b/media/gpu/v4l2_video_decode_accelerator.h
@@ -6,8 +6,8 @@
 // that utilizes hardware video decoders, which expose Video4Linux 2 API
 // (http://linuxtv.org/downloads/v4l-dvb-apis/).
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -22,22 +22,21 @@
 #include "base/memory/ref_counted.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_image_processor.h"
 #include "media/base/limits.h"
 #include "media/base/video_decoder_config.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_image_processor.h"
 #include "media/video/picture.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gfx/geometry/size.h"
 #include "ui/gl/gl_bindings.h"
 
 namespace media {
-class H264Parser;
-}  // namespace media
 
-namespace content {
+class H264Parser;
+
 // This class handles video accelerators directly through a V4L2 device exported
 // by the hardware blocks.
 //
@@ -85,7 +84,7 @@
 // Resolution change: V4L2VDA destroy image processor when destroying output
 //   buffrers. We cannot drop any frame during resolution change. So V4L2VDA
 //   should destroy output buffers after image processor returns all the frames.
-class CONTENT_EXPORT V4L2VideoDecodeAccelerator
+class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
     : public media::VideoDecodeAccelerator {
  public:
   V4L2VideoDecodeAccelerator(
@@ -112,7 +111,7 @@
   media::VideoPixelFormat GetOutputFormat() const override;
 
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedProfiles();
+  GetSupportedProfiles();
 
  private:
   // These are rather subjectively tuned.
@@ -133,14 +132,14 @@
 
   // Internal state of the decoder.
   enum State {
-    kUninitialized,      // Initialize() not yet called.
-    kInitialized,        // Initialize() returned true; ready to start decoding.
-    kDecoding,           // DecodeBufferInitial() successful; decoding frames.
-    kResetting,          // Presently resetting.
-    kAfterReset,         // After Reset(), ready to start decoding again.
-    kChangingResolution, // Performing resolution change, all remaining
-                         // pre-change frames decoded and processed.
-    kError,              // Error in kDecoding state.
+    kUninitialized,  // Initialize() not yet called.
+    kInitialized,    // Initialize() returned true; ready to start decoding.
+    kDecoding,       // DecodeBufferInitial() successful; decoding frames.
+    kResetting,      // Presently resetting.
+    kAfterReset,     // After Reset(), ready to start decoding again.
+    kChangingResolution,  // Performing resolution change, all remaining
+                          // pre-change frames decoded and processed.
+    kError,               // Error in kDecoding state.
   };
 
   enum OutputRecordState {
@@ -168,11 +167,11 @@
   struct InputRecord {
     InputRecord();
     ~InputRecord();
-    bool at_device;         // held by device.
-    void* address;          // mmap() address.
-    size_t length;          // mmap() length.
-    off_t bytes_used;       // bytes filled in the mmap() segment.
-    int32_t input_id;       // triggering input_id as given to Decode().
+    bool at_device;    // held by device.
+    void* address;     // mmap() address.
+    size_t length;     // mmap() length.
+    off_t bytes_used;  // bytes filled in the mmap() segment.
+    int32_t input_id;  // triggering input_id as given to Decode().
   };
 
   // Record for output buffers.
@@ -402,7 +401,7 @@
   // Got a reset request while we were performing resolution change.
   bool resolution_change_reset_pending_;
   // Input queue for decoder_thread_: BitstreamBuffers in.
-  std::queue<linked_ptr<BitstreamBufferRef> > decoder_input_queue_;
+  std::queue<linked_ptr<BitstreamBufferRef>> decoder_input_queue_;
   // For H264 decode, hardware requires that we send it frame-sized chunks.
   // We'll need to parse the stream.
   std::unique_ptr<media::H264Parser> decoder_h264_parser_;
@@ -513,6 +512,6 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2VideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/v4l2_video_encode_accelerator.cc b/media/gpu/v4l2_video_encode_accelerator.cc
similarity index 93%
rename from content/common/gpu/media/v4l2_video_encode_accelerator.cc
rename to media/gpu/v4l2_video_encode_accelerator.cc
index 414118207..8c3afa4 100644
--- a/content/common/gpu/media/v4l2_video_encode_accelerator.cc
+++ b/media/gpu/v4l2_video_encode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
+#include "media/gpu/v4l2_video_encode_accelerator.h"
 
 #include <fcntl.h>
 #include <linux/videodev2.h>
@@ -20,9 +20,9 @@
 #include "base/numerics/safe_conversions.h"
 #include "base/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/bitstream_buffer.h"
+#include "media/gpu/shared_memory_region.h"
 
 #define NOTIFY_ERROR(x)                        \
   do {                                         \
@@ -30,13 +30,13 @@
     SetErrorState(x);                          \
   } while (0)
 
-#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)        \
-  do {                                                                 \
-    if (device_->Ioctl(type, arg) != 0) {                              \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str;   \
-      NOTIFY_ERROR(kPlatformFailureError);                             \
-      return value;                                                    \
-    }                                                                  \
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str)      \
+  do {                                                               \
+    if (device_->Ioctl(type, arg) != 0) {                            \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str; \
+      NOTIFY_ERROR(kPlatformFailureError);                           \
+      return value;                                                  \
+    }                                                                \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN(type, arg) \
@@ -45,13 +45,13 @@
 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
   IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
 
-#define IOCTL_OR_LOG_ERROR(type, arg)                              \
-  do {                                                             \
-    if (device_->Ioctl(type, arg) != 0)                            \
-      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type;  \
+#define IOCTL_OR_LOG_ERROR(type, arg)                             \
+  do {                                                            \
+    if (device_->Ioctl(type, arg) != 0)                           \
+      PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type; \
   } while (0)
 
-namespace content {
+namespace media {
 
 struct V4L2VideoEncodeAccelerator::BitstreamBufferRef {
   BitstreamBufferRef(int32_t id, std::unique_ptr<SharedMemoryRegion> shm)
@@ -60,18 +60,14 @@
   const std::unique_ptr<SharedMemoryRegion> shm;
 };
 
-V4L2VideoEncodeAccelerator::InputRecord::InputRecord() : at_device(false) {
-}
+V4L2VideoEncodeAccelerator::InputRecord::InputRecord() : at_device(false) {}
 
-V4L2VideoEncodeAccelerator::InputRecord::~InputRecord() {
-}
+V4L2VideoEncodeAccelerator::InputRecord::~InputRecord() {}
 
 V4L2VideoEncodeAccelerator::OutputRecord::OutputRecord()
-    : at_device(false), address(NULL), length(0) {
-}
+    : at_device(false), address(NULL), length(0) {}
 
-V4L2VideoEncodeAccelerator::OutputRecord::~OutputRecord() {
-}
+V4L2VideoEncodeAccelerator::OutputRecord::~OutputRecord() {}
 
 V4L2VideoEncodeAccelerator::ImageProcessorInputRecord::
     ImageProcessorInputRecord()
@@ -136,7 +132,8 @@
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
     LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
-                  "caps check failed: 0x" << std::hex << caps.capabilities;
+                  "caps check failed: 0x"
+               << std::hex << caps.capabilities;
     return false;
   }
 
@@ -249,11 +246,8 @@
     }
   } else {
     encoder_thread_.message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&V4L2VideoEncodeAccelerator::EncodeTask,
-                   base::Unretained(this),
-                   frame,
-                   force_keyframe));
+        FROM_HERE, base::Bind(&V4L2VideoEncodeAccelerator::EncodeTask,
+                              base::Unretained(this), frame, force_keyframe));
   }
 }
 
@@ -279,8 +273,7 @@
   encoder_thread_.message_loop()->PostTask(
       FROM_HERE,
       base::Bind(&V4L2VideoEncodeAccelerator::UseOutputBitstreamBufferTask,
-                 base::Unretained(this),
-                 base::Passed(&buffer_ref)));
+                 base::Unretained(this), base::Passed(&buffer_ref)));
 }
 
 void V4L2VideoEncodeAccelerator::RequestEncodingParametersChange(
@@ -294,9 +287,7 @@
       FROM_HERE,
       base::Bind(
           &V4L2VideoEncodeAccelerator::RequestEncodingParametersChangeTask,
-          base::Unretained(this),
-          bitrate,
-          framerate));
+          base::Unretained(this), bitrate, framerate));
 }
 
 void V4L2VideoEncodeAccelerator::Destroy() {
@@ -313,9 +304,8 @@
   // If the encoder thread is running, destroy using posted task.
   if (encoder_thread_.IsRunning()) {
     encoder_thread_.message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&V4L2VideoEncodeAccelerator::DestroyTask,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&V4L2VideoEncodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
     // DestroyTask() will put the encoder into kError state and cause all tasks
     // to no-op.
     encoder_thread_.Stop();
@@ -342,8 +332,8 @@
   memset(&fmtdesc, 0, sizeof(fmtdesc));
   fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
   for (; device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) {
-    device_->GetSupportedResolution(fmtdesc.pixelformat,
-                                    &min_resolution, &profile.max_resolution);
+    device_->GetSupportedResolution(fmtdesc.pixelformat, &min_resolution,
+                                    &profile.max_resolution);
     switch (fmtdesc.pixelformat) {
       case V4L2_PIX_FMT_H264:
         profile.profile = media::H264PROFILE_MAIN;
@@ -521,20 +511,15 @@
   DCHECK(device_poll_thread_.message_loop());
   // Queue the DevicePollTask() now.
   device_poll_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2VideoEncodeAccelerator::DevicePollTask,
-                 base::Unretained(this),
-                 poll_device));
+      FROM_HERE, base::Bind(&V4L2VideoEncodeAccelerator::DevicePollTask,
+                            base::Unretained(this), poll_device));
 
-  DVLOG(2) << __func__ << ": buffer counts: ENC["
-           << encoder_input_queue_.size() << "] => DEVICE["
-           << free_input_buffers_.size() << "+"
-           << input_buffer_queued_count_ << "/"
-           << input_buffer_map_.size() << "->"
-           << free_output_buffers_.size() << "+"
-           << output_buffer_queued_count_ << "/"
-           << output_buffer_map_.size() << "] => OUT["
-           << encoder_output_queue_.size() << "]";
+  DVLOG(2) << __func__ << ": buffer counts: ENC[" << encoder_input_queue_.size()
+           << "] => DEVICE[" << free_input_buffers_.size() << "+"
+           << input_buffer_queued_count_ << "/" << input_buffer_map_.size()
+           << "->" << free_output_buffers_.size() << "+"
+           << output_buffer_queued_count_ << "/" << output_buffer_map_.size()
+           << "] => OUT[" << encoder_output_queue_.size() << "]";
 }
 
 void V4L2VideoEncodeAccelerator::Enqueue() {
@@ -673,8 +658,9 @@
     }
 
     DVLOG(3) << "Dequeue(): returning "
-                "bitstream_buffer_id=" << output_record.buffer_ref->id
-             << ", size=" << output_size << ", key_frame=" << key_frame;
+                "bitstream_buffer_id="
+             << output_record.buffer_ref->id << ", size=" << output_size
+             << ", key_frame=" << key_frame;
     child_task_runner_->PostTask(
         FROM_HERE,
         base::Bind(&Client::BitstreamBufferReady, client_,
@@ -706,9 +692,9 @@
 
   DCHECK_EQ(device_input_format_, frame->format());
   for (size_t i = 0; i < input_planes_count_; ++i) {
-    qbuf.m.planes[i].bytesused =
-        base::checked_cast<__u32>(media::VideoFrame::PlaneSize(
-            frame->format(), i, input_allocated_size_).GetArea());
+    qbuf.m.planes[i].bytesused = base::checked_cast<__u32>(
+        media::VideoFrame::PlaneSize(frame->format(), i, input_allocated_size_)
+            .GetArea());
 
     switch (input_memory_type_) {
       case V4L2_MEMORY_USERPTR:
@@ -784,10 +770,8 @@
   // Enqueue a poll task with no devices to poll on -- it will wait only on the
   // interrupt fd.
   device_poll_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2VideoEncodeAccelerator::DevicePollTask,
-                 base::Unretained(this),
-                 false));
+      FROM_HERE, base::Bind(&V4L2VideoEncodeAccelerator::DevicePollTask,
+                            base::Unretained(this), false));
 
   return true;
 }
@@ -855,9 +839,8 @@
   // All processing should happen on ServiceDeviceTask(), since we shouldn't
   // touch encoder state from this thread.
   encoder_thread_.message_loop()->PostTask(
-      FROM_HERE,
-      base::Bind(&V4L2VideoEncodeAccelerator::ServiceDeviceTask,
-                 base::Unretained(this)));
+      FROM_HERE, base::Bind(&V4L2VideoEncodeAccelerator::ServiceDeviceTask,
+                            base::Unretained(this)));
 }
 
 void V4L2VideoEncodeAccelerator::NotifyError(Error error) {
@@ -1214,11 +1197,9 @@
     buffer.m.planes = planes;
     buffer.length = arraysize(planes);
     IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
-    void* address = device_->Mmap(NULL,
-                                  buffer.m.planes[0].length,
-                                  PROT_READ | PROT_WRITE,
-                                  MAP_SHARED,
-                                  buffer.m.planes[0].m.mem_offset);
+    void* address =
+        device_->Mmap(NULL, buffer.m.planes[0].length, PROT_READ | PROT_WRITE,
+                      MAP_SHARED, buffer.m.planes[0].m.mem_offset);
     if (address == MAP_FAILED) {
       PLOG(ERROR) << "CreateOutputBuffers(): mmap() failed";
       return false;
@@ -1269,4 +1250,4 @@
   free_output_buffers_.clear();
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/v4l2_video_encode_accelerator.h b/media/gpu/v4l2_video_encode_accelerator.h
similarity index 94%
rename from content/common/gpu/media/v4l2_video_encode_accelerator.h
rename to media/gpu/v4l2_video_encode_accelerator.h
index 4a8d842..a57735b 100644
--- a/content/common/gpu/media/v4l2_video_encode_accelerator.h
+++ b/media/gpu/v4l2_video_encode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
 
 #include <linux/videodev2.h>
 #include <stddef.h>
@@ -19,9 +19,9 @@
 #include "base/memory/weak_ptr.h"
 #include "base/threading/thread.h"
 #include "base/time/time.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_image_processor.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_image_processor.h"
 #include "media/video/video_encode_accelerator.h"
 #include "ui/gfx/geometry/size.h"
 
@@ -31,7 +31,7 @@
 
 }  // namespace media
 
-namespace content {
+namespace media {
 
 // This class handles video encode acceleration by interfacing with a V4L2
 // device exposed by the codec hardware driver. The threading model of this
@@ -40,7 +40,7 @@
 // This class may try to instantiate and use a V4L2ImageProcessor for input
 // format conversion, if the input format requested via Initialize() is not
 // accepted by the hardware codec.
-class CONTENT_EXPORT V4L2VideoEncodeAccelerator
+class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
     : public media::VideoEncodeAccelerator {
  public:
   explicit V4L2VideoEncodeAccelerator(const scoped_refptr<V4L2Device>& device);
@@ -56,8 +56,7 @@
                   Client* client) override;
   void Encode(const scoped_refptr<media::VideoFrame>& frame,
               bool force_keyframe) override;
-  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer)
-      override;
+  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
   void RequestEncodingParametersChange(uint32_t bitrate,
                                        uint32_t framerate) override;
   void Destroy() override;
@@ -262,7 +261,7 @@
 
   // Bitstream buffers ready to be used to return encoded output, as a LIFO
   // since we don't care about ordering.
-  std::vector<linked_ptr<BitstreamBufferRef> > encoder_output_queue_;
+  std::vector<linked_ptr<BitstreamBufferRef>> encoder_output_queue_;
 
   // Image processor, if one is in use.
   std::unique_ptr<V4L2ImageProcessor> image_processor_;
@@ -298,6 +297,6 @@
   DISALLOW_COPY_AND_ASSIGN(V4L2VideoEncodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_V4L2_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/va.sigs b/media/gpu/va.sigs
similarity index 100%
rename from content/common/gpu/media/va.sigs
rename to media/gpu/va.sigs
diff --git a/content/common/gpu/media/va_drm.sigs b/media/gpu/va_drm.sigs
similarity index 100%
rename from content/common/gpu/media/va_drm.sigs
rename to media/gpu/va_drm.sigs
diff --git a/content/common/gpu/media/va_stub_header.fragment b/media/gpu/va_stub_header.fragment
similarity index 100%
rename from content/common/gpu/media/va_stub_header.fragment
rename to media/gpu/va_stub_header.fragment
diff --git a/content/common/gpu/media/va_surface.h b/media/gpu/va_surface.h
similarity index 92%
rename from content/common/gpu/media/va_surface.h
rename to media/gpu/va_surface.h
index 41c7880..c73bc57 100644
--- a/content/common/gpu/media/va_surface.h
+++ b/media/gpu/va_surface.h
@@ -5,17 +5,17 @@
 // This file contains the definition of VASurface class, used for decoding by
 // VaapiVideoDecodeAccelerator and VaapiH264Decoder.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VA_SURFACE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VA_SURFACE_H_
+#ifndef MEDIA_GPU_VA_SURFACE_H_
+#define MEDIA_GPU_VA_SURFACE_H_
 
 #include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/content_export.h"
+#include "media/gpu/media_gpu_export.h"
 #include "third_party/libva/va/va.h"
 #include "ui/gfx/geometry/size.h"
 
-namespace content {
+namespace media {
 
 // A VA-API-specific decode surface used by VaapiH264Decoder to decode into
 // and use as reference for decoding other surfaces. It is also handed by the
@@ -83,7 +83,8 @@
 //                                       v
 //                       VaapiWrapper frees VASurfaceID.
 //
-class CONTENT_EXPORT VASurface : public base::RefCountedThreadSafe<VASurface> {
+class MEDIA_GPU_EXPORT VASurface
+    : public base::RefCountedThreadSafe<VASurface> {
  public:
   // Provided by user, will be called when all references to the surface
   // are released.
@@ -94,9 +95,7 @@
             unsigned int format,
             const ReleaseCB& release_cb);
 
-  VASurfaceID id() {
-    return va_surface_id_;
-  }
+  VASurfaceID id() { return va_surface_id_; }
 
   const gfx::Size& size() const { return size_; }
   unsigned int format() const { return format_; }
@@ -113,6 +112,6 @@
   DISALLOW_COPY_AND_ASSIGN(VASurface);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VA_SURFACE_H_
+#endif  // MEDIA_GPU_VA_SURFACE_H_
diff --git a/content/common/gpu/media/va_x11.sigs b/media/gpu/va_x11.sigs
similarity index 100%
rename from content/common/gpu/media/va_x11.sigs
rename to media/gpu/va_x11.sigs
diff --git a/content/common/gpu/media/vaapi_drm_picture.cc b/media/gpu/vaapi_drm_picture.cc
similarity index 94%
rename from content/common/gpu/media/vaapi_drm_picture.cc
rename to media/gpu/vaapi_drm_picture.cc
index ab5a4f2..30539df5 100644
--- a/content/common/gpu/media/vaapi_drm_picture.cc
+++ b/media/gpu/vaapi_drm_picture.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/file_descriptor_posix.h"
-#include "content/common/gpu/media/va_surface.h"
-#include "content/common/gpu/media/vaapi_drm_picture.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/va_surface.h"
+#include "media/gpu/vaapi_drm_picture.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "third_party/libva/va/drm/va_drm.h"
 #include "third_party/libva/va/va.h"
 #include "ui/gfx/gpu_memory_buffer.h"
@@ -23,7 +23,7 @@
 
 }  // namespace
 
-namespace content {
+namespace media {
 
 VaapiDrmPicture::VaapiDrmPicture(
     const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
diff --git a/content/common/gpu/media/vaapi_drm_picture.h b/media/gpu/vaapi_drm_picture.h
similarity index 86%
rename from content/common/gpu/media/vaapi_drm_picture.h
rename to media/gpu/vaapi_drm_picture.h
index 7f5fc8a1..b23811f4 100644
--- a/content/common/gpu/media/vaapi_drm_picture.h
+++ b/media/gpu/vaapi_drm_picture.h
@@ -6,15 +6,15 @@
 // Ozone window system used by VaapiVideoDecodeAccelerator to produce
 // output pictures.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_DRM_PICTURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_DRM_PICTURE_H_
+#ifndef MEDIA_GPU_VAAPI_DRM_PICTURE_H_
+#define MEDIA_GPU_VAAPI_DRM_PICTURE_H_
 
 #include <stdint.h>
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
-#include "content/common/gpu/media/vaapi_picture.h"
+#include "media/gpu/vaapi_picture.h"
 #include "ui/gfx/buffer_types.h"
 #include "ui/gfx/geometry/size.h"
 
@@ -26,7 +26,7 @@
 class NativePixmap;
 }
 
-namespace content {
+namespace media {
 
 class VaapiWrapper;
 
@@ -65,6 +65,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiDrmPicture);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_DRM_PICTURE_H_
+#endif  // MEDIA_GPU_VAAPI_DRM_PICTURE_H_
diff --git a/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc b/media/gpu/vaapi_jpeg_decode_accelerator.cc
similarity index 95%
rename from content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
rename to media/gpu/vaapi_jpeg_decode_accelerator.cc
index fc25553..d288573 100644
--- a/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
+++ b/media/gpu/vaapi_jpeg_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h"
+#include "media/gpu/vaapi_jpeg_decode_accelerator.h"
 
 #include <stddef.h>
 #include <string.h>
@@ -15,26 +15,25 @@
 #include "base/metrics/histogram.h"
 #include "base/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/shared_memory_region.h"
-#include "content/common/gpu/media/vaapi_picture.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "media/base/video_frame.h"
 #include "media/filters/jpeg_parser.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/gpu/vaapi_picture.h"
 #include "third_party/libyuv/include/libyuv.h"
 
-namespace content {
+namespace media {
 
 namespace {
 // UMA errors that the VaapiJpegDecodeAccelerator class reports.
 enum VAJDADecoderFailure {
   VAAPI_ERROR = 0,
-  // UMA requires that max must be greater than 1.
-  VAJDA_DECODER_FAILURES_MAX = 2,
+  VAJDA_DECODER_FAILURES_MAX,
 };
 
 static void ReportToUMA(VAJDADecoderFailure failure) {
   UMA_HISTOGRAM_ENUMERATION("Media.VAJDA.DecoderFailure", failure,
-                            VAJDA_DECODER_FAILURES_MAX);
+                            VAJDA_DECODER_FAILURES_MAX + 1);
 }
 
 static unsigned int VaSurfaceFormatForJpeg(
@@ -86,8 +85,7 @@
       shm(std::move(shm)),
       video_frame(video_frame) {}
 
-VaapiJpegDecodeAccelerator::DecodeRequest::~DecodeRequest() {
-}
+VaapiJpegDecodeAccelerator::DecodeRequest::~DecodeRequest() {}
 
 void VaapiJpegDecodeAccelerator::NotifyError(int32_t bitstream_buffer_id,
                                              Error error) {
@@ -321,4 +319,4 @@
   return VaapiWrapper::IsJpegDecodeSupported();
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h b/media/gpu/vaapi_jpeg_decode_accelerator.h
similarity index 89%
rename from content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
rename to media/gpu/vaapi_jpeg_decode_accelerator.h
index eaf54e86..353d657 100644
--- a/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
+++ b/media/gpu/vaapi_jpeg_decode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_VAAPI_JPEG_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_VAAPI_JPEG_DECODE_ACCELERATOR_H_
 
 #include <stdint.h>
 
@@ -16,14 +16,14 @@
 #include "base/synchronization/lock.h"
 #include "base/threading/non_thread_safe.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/shared_memory_region.h"
-#include "content/common/gpu/media/vaapi_jpeg_decoder.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
 #include "media/base/bitstream_buffer.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/gpu/vaapi_jpeg_decoder.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "media/video/jpeg_decode_accelerator.h"
 
-namespace content {
+namespace media {
 
 // Class to provide JPEG decode acceleration for Intel systems with hardware
 // support for it, and on which libva is available.
@@ -33,7 +33,7 @@
 // ChildThread.  A few methods on it are called on the decoder thread which is
 // stopped during |this->Destroy()|, so any tasks posted to the decoder thread
 // can assume |*this| is still alive.  See |weak_this_| below for more details.
-class CONTENT_EXPORT VaapiJpegDecodeAccelerator
+class MEDIA_GPU_EXPORT VaapiJpegDecodeAccelerator
     : public media::JpegDecodeAccelerator {
  public:
   VaapiJpegDecodeAccelerator(
@@ -117,6 +117,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiJpegDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_VAAPI_JPEG_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/vaapi_jpeg_decoder.cc b/media/gpu/vaapi_jpeg_decoder.cc
similarity index 75%
rename from content/common/gpu/media/vaapi_jpeg_decoder.cc
rename to media/gpu/vaapi_jpeg_decoder.cc
index 78c4663..8c8afcb 100644
--- a/content/common/gpu/media/vaapi_jpeg_decoder.cc
+++ b/media/gpu/vaapi_jpeg_decoder.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_jpeg_decoder.h"
+#include "media/gpu/vaapi_jpeg_decoder.h"
 
 #include <stddef.h>
 #include <string.h>
@@ -16,80 +16,69 @@
 // K.3.3.1 "Specification of typical tables for DC difference coding"
 media::JpegHuffmanTable
     kDefaultDcTable[media::kJpegMaxHuffmanTableNumBaseline] = {
-      // luminance DC coefficients
-      {
-       true,
-       {0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0},
-       {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b},
-      },
-      // chrominance DC coefficients
-      {
-       true,
-       {0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0},
-       {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb},
-      },
+        // luminance DC coefficients
+        {
+            true,
+            {0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0},
+            {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a,
+             0x0b},
+        },
+        // chrominance DC coefficients
+        {
+            true,
+            {0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0},
+            {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb},
+        },
 };
 
 // K.3.3.2 "Specification of typical tables for AC coefficient coding"
 media::JpegHuffmanTable
     kDefaultAcTable[media::kJpegMaxHuffmanTableNumBaseline] = {
-      // luminance AC coefficients
-      {
-       true,
-       {0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d},
-       {0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
-        0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
-        0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
-        0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
-        0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
-        0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
-        0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
-        0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
-        0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
-        0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
-        0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
-        0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
-        0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
-        0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
-        0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
-        0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
-        0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
-        0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
-        0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
-        0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
-        0xf9, 0xfa},
-      },
-      // chrominance AC coefficients
-      {
-       true,
-       {0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77},
-       {0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
-        0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
-        0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
-        0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
-        0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
-        0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
-        0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
-        0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
-        0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
-        0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
-        0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
-        0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
-        0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
-        0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
-        0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
-        0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
-        0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
-        0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
-        0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
-        0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
-        0xf9, 0xfa},
-      },
+        // luminance AC coefficients
+        {
+            true,
+            {0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d},
+            {0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41,
+             0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91,
+             0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24,
+             0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a,
+             0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38,
+             0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53,
+             0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66,
+             0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+             0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93,
+             0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+             0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+             0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9,
+             0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1,
+             0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2,
+             0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa},
+        },
+        // chrominance AC coefficients
+        {
+            true,
+            {0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77},
+            {0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12,
+             0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14,
+             0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15,
+             0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17,
+             0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37,
+             0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a,
+             0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65,
+             0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+             0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
+             0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3,
+             0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5,
+             0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+             0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9,
+             0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2,
+             0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa},
+        },
 };
 
 }  // namespace
 
-namespace content {
+namespace media {
 
 // VAAPI only support subset of JPEG profiles. This function determines a given
 // parsed JPEG result is supported or not.
@@ -304,4 +293,4 @@
   return true;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_jpeg_decoder.h b/media/gpu/vaapi_jpeg_decoder.h
similarity index 78%
rename from content/common/gpu/media/vaapi_jpeg_decoder.h
rename to media/gpu/vaapi_jpeg_decoder.h
index 3040016..1db06820 100644
--- a/content/common/gpu/media/vaapi_jpeg_decoder.h
+++ b/media/gpu/vaapi_jpeg_decoder.h
@@ -2,18 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODER_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODER_H_
+#ifndef MEDIA_GPU_VAAPI_JPEG_DECODER_H_
+#define MEDIA_GPU_VAAPI_JPEG_DECODER_H_
 
 #include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/vaapi_wrapper.h"
 
 namespace media {
 struct JpegParseResult;
 }  // namespace media
 
-namespace content {
+namespace media {
 
 // A JPEG decoder that utilizes VA-API hardware video decode acceleration on
 // Intel systems. Provides functionality to allow plugging VAAPI HW
@@ -22,7 +22,7 @@
 // Clients of this class are expected to manage VA surfaces created via
 // VaapiWrapper, parse JPEG picture via media::ParseJpegPicture, and then pass
 // them to this class.
-class CONTENT_EXPORT VaapiJpegDecoder {
+class MEDIA_GPU_EXPORT VaapiJpegDecoder {
  public:
   // Decode a JPEG picture. It will fill VA-API parameters and call
   // corresponding VA-API methods according to parsed JPEG result
@@ -40,6 +40,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(VaapiJpegDecoder);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_JPEG_DECODER_H_
+#endif  // MEDIA_GPU_VAAPI_JPEG_DECODER_H_
diff --git a/content/common/gpu/media/vaapi_jpeg_decoder_unittest.cc b/media/gpu/vaapi_jpeg_decoder_unittest.cc
similarity index 95%
rename from content/common/gpu/media/vaapi_jpeg_decoder_unittest.cc
rename to media/gpu/vaapi_jpeg_decoder_unittest.cc
index 47e35ff..d0d45e2 100644
--- a/content/common/gpu/media/vaapi_jpeg_decoder_unittest.cc
+++ b/media/gpu/vaapi_jpeg_decoder_unittest.cc
@@ -18,12 +18,12 @@
 #include "base/md5.h"
 #include "base/path_service.h"
 #include "base/strings/string_piece.h"
-#include "content/common/gpu/media/vaapi_jpeg_decoder.h"
 #include "media/base/test_data_util.h"
 #include "media/base/video_frame.h"
 #include "media/filters/jpeg_parser.h"
+#include "media/gpu/vaapi_jpeg_decoder.h"
 
-namespace content {
+namespace media {
 namespace {
 
 const char* kTestFilename = "pixel-1280x720.jpg";
@@ -131,11 +131,11 @@
 }
 
 }  // namespace
-}  // namespace content
+}  // namespace media
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
   base::AtExitManager exit_manager;
-  content::VaapiWrapper::PreSandboxInitialization();
+  media::VaapiWrapper::PreSandboxInitialization();
   return RUN_ALL_TESTS();
 }
diff --git a/content/common/gpu/media/vaapi_picture.cc b/media/gpu/vaapi_picture.cc
similarity index 82%
rename from content/common/gpu/media/vaapi_picture.cc
rename to media/gpu/vaapi_picture.cc
index cdf8c355..9e20590 100644
--- a/content/common/gpu/media/vaapi_picture.cc
+++ b/media/gpu/vaapi_picture.cc
@@ -2,18 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_picture.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_picture.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "ui/gl/gl_bindings.h"
 #include "ui/gl/gl_implementation.h"
 
 #if defined(USE_X11)
-#include "content/common/gpu/media/vaapi_tfp_picture.h"
+#include "media/gpu/vaapi_tfp_picture.h"
 #elif defined(USE_OZONE)
-#include "content/common/gpu/media/vaapi_drm_picture.h"
+#include "media/gpu/vaapi_drm_picture.h"
 #endif
 
-namespace content {
+namespace media {
 
 // static
 linked_ptr<VaapiPicture> VaapiPicture::CreatePicture(
@@ -50,4 +50,4 @@
 #endif
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_picture.h b/media/gpu/vaapi_picture.h
similarity index 89%
rename from content/common/gpu/media/vaapi_picture.h
rename to media/gpu/vaapi_picture.h
index 4bd51e1..53b4fa4 100644
--- a/content/common/gpu/media/vaapi_picture.h
+++ b/media/gpu/vaapi_picture.h
@@ -7,8 +7,8 @@
 // (X11/Ozone) and used by VaapiVideoDecodeAccelerator to produce
 // output pictures.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_PICTURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_PICTURE_H_
+#ifndef MEDIA_GPU_VAAPI_PICTURE_H_
+#define MEDIA_GPU_VAAPI_PICTURE_H_
 
 #include <stdint.h>
 
@@ -16,14 +16,14 @@
 #include "base/memory/linked_ptr.h"
 #include "base/memory/ref_counted.h"
 #include "base/threading/non_thread_safe.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
 #include "ui/gfx/geometry/size.h"
 
 namespace gl {
 class GLImage;
 }
 
-namespace content {
+namespace media {
 
 class VASurface;
 class VaapiWrapper;
@@ -80,6 +80,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiPicture);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_PICTURE_H_
+#endif  // MEDIA_GPU_VAAPI_PICTURE_H_
diff --git a/content/common/gpu/media/vaapi_tfp_picture.cc b/media/gpu/vaapi_tfp_picture.cc
similarity index 91%
rename from content/common/gpu/media/vaapi_tfp_picture.cc
rename to media/gpu/vaapi_tfp_picture.cc
index 074ba98e..b492a57 100644
--- a/content/common/gpu/media/vaapi_tfp_picture.cc
+++ b/media/gpu/vaapi_tfp_picture.cc
@@ -2,15 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/va_surface.h"
-#include "content/common/gpu/media/vaapi_tfp_picture.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/va_surface.h"
+#include "media/gpu/vaapi_tfp_picture.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "ui/gfx/x/x11_types.h"
 #include "ui/gl/gl_bindings.h"
 #include "ui/gl/gl_image_glx.h"
 #include "ui/gl/scoped_binders.h"
 
-namespace content {
+namespace media {
 
 VaapiTFPPicture::VaapiTFPPicture(
     const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
@@ -77,4 +77,4 @@
   return nullptr;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_tfp_picture.h b/media/gpu/vaapi_tfp_picture.h
similarity index 84%
rename from content/common/gpu/media/vaapi_tfp_picture.h
rename to media/gpu/vaapi_tfp_picture.h
index 5ef35653..1c9cdeb 100644
--- a/content/common/gpu/media/vaapi_tfp_picture.h
+++ b/media/gpu/vaapi_tfp_picture.h
@@ -6,14 +6,14 @@
 // X11 window system used by VaapiVideoDecodeAccelerator to produce
 // output pictures.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_TFP_PICTURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_TFP_PICTURE_H_
+#ifndef MEDIA_GPU_VAAPI_TFP_PICTURE_H_
+#define MEDIA_GPU_VAAPI_TFP_PICTURE_H_
 
 #include <stdint.h>
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/gpu/media/vaapi_picture.h"
+#include "media/gpu/vaapi_picture.h"
 #include "ui/gfx/geometry/size.h"
 #include "ui/gl/gl_bindings.h"
 
@@ -25,7 +25,7 @@
 class GLImageGLX;
 }
 
-namespace content {
+namespace media {
 
 class VaapiWrapper;
 
@@ -58,6 +58,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiTFPPicture);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_TFP_PICTURE_H_
+#endif  // MEDIA_GPU_VAAPI_TFP_PICTURE_H_
diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator.cc b/media/gpu/vaapi_video_decode_accelerator.cc
similarity index 92%
rename from content/common/gpu/media/vaapi_video_decode_accelerator.cc
rename to media/gpu/vaapi_video_decode_accelerator.cc
index 8ab7995..8f1c2bc 100644
--- a/content/common/gpu/media/vaapi_video_decode_accelerator.cc
+++ b/media/gpu/vaapi_video_decode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
+#include "media/gpu/vaapi_video_decode_accelerator.h"
 
 #include <string.h>
 
@@ -16,41 +16,40 @@
 #include "base/strings/string_util.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/trace_event/trace_event.h"
-#include "content/common/gpu/media/accelerated_video_decoder.h"
-#include "content/common/gpu/media/h264_decoder.h"
-#include "content/common/gpu/media/vaapi_picture.h"
-#include "content/common/gpu/media/vp8_decoder.h"
-#include "content/common/gpu/media/vp9_decoder.h"
 #include "gpu/ipc/service/gpu_channel.h"
 #include "media/base/bind_to_current_loop.h"
+#include "media/gpu/accelerated_video_decoder.h"
+#include "media/gpu/h264_decoder.h"
+#include "media/gpu/vaapi_picture.h"
+#include "media/gpu/vp8_decoder.h"
+#include "media/gpu/vp9_decoder.h"
 #include "media/video/picture.h"
 #include "third_party/libva/va/va_dec_vp8.h"
 #include "ui/gl/gl_bindings.h"
 #include "ui/gl/gl_image.h"
 
-namespace content {
+namespace media {
 
 namespace {
 // UMA errors that the VaapiVideoDecodeAccelerator class reports.
 enum VAVDADecoderFailure {
   VAAPI_ERROR = 0,
-  // UMA requires that max must be greater than 1.
-  VAVDA_DECODER_FAILURES_MAX = 2,
+  VAVDA_DECODER_FAILURES_MAX,
 };
 }
 
 static void ReportToUMA(VAVDADecoderFailure failure) {
   UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure,
-                            VAVDA_DECODER_FAILURES_MAX);
+                            VAVDA_DECODER_FAILURES_MAX + 1);
 }
 
-#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret)  \
-  do {                                                              \
-    if (!(result)) {                                                \
-      LOG(ERROR) << log;                                            \
-      NotifyError(error_code);                                      \
-      return ret;                                                   \
-    }                                                               \
+#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
+  do {                                                             \
+    if (!(result)) {                                               \
+      LOG(ERROR) << log;                                           \
+      NotifyError(error_code);                                     \
+      return ret;                                                  \
+    }                                                              \
   } while (0)
 
 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface
@@ -75,13 +74,13 @@
     const scoped_refptr<VASurface>& va_surface)
     : bitstream_id_(bitstream_id), va_surface_(va_surface) {}
 
-VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {
-}
+VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {}
 
 class VaapiH264Picture : public H264Picture {
  public:
-  VaapiH264Picture(const scoped_refptr<
-      VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
+  VaapiH264Picture(
+      const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
+          dec_surface);
 
   VaapiH264Picture* AsVaapiH264Picture() override { return this; }
   scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
@@ -96,13 +95,12 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture);
 };
 
-VaapiH264Picture::VaapiH264Picture(const scoped_refptr<
-    VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
-    : dec_surface_(dec_surface) {
-}
+VaapiH264Picture::VaapiH264Picture(
+    const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
 
-VaapiH264Picture::~VaapiH264Picture() {
-}
+VaapiH264Picture::~VaapiH264Picture() {}
 
 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator
     : public H264Decoder::H264Accelerator {
@@ -152,8 +150,9 @@
 
 class VaapiVP8Picture : public VP8Picture {
  public:
-  VaapiVP8Picture(const scoped_refptr<
-      VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
+  VaapiVP8Picture(
+      const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
+          dec_surface);
 
   VaapiVP8Picture* AsVaapiVP8Picture() override { return this; }
   scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
@@ -168,13 +167,12 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiVP8Picture);
 };
 
-VaapiVP8Picture::VaapiVP8Picture(const scoped_refptr<
-    VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
-    : dec_surface_(dec_surface) {
-}
+VaapiVP8Picture::VaapiVP8Picture(
+    const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
 
-VaapiVP8Picture::~VaapiVP8Picture() {
-}
+VaapiVP8Picture::~VaapiVP8Picture() {}
 
 class VaapiVideoDecodeAccelerator::VaapiVP8Accelerator
     : public VP8Decoder::VP8Accelerator {
@@ -260,20 +258,20 @@
 
 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0) {}
 
-VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
-}
+VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {}
 
 void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
   if (message_loop_ != base::MessageLoop::current()) {
     DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
-    message_loop_->PostTask(FROM_HERE, base::Bind(
-        &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error));
+    message_loop_->PostTask(
+        FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::NotifyError,
+                              weak_this_, error));
     return;
   }
 
   // Post Cleanup() as a task so we don't recursively acquire lock_.
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
+  message_loop_->PostTask(
+      FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
 
   LOG(ERROR) << "Notifying of error " << error;
   if (client_) {
@@ -400,8 +398,7 @@
 
   int32_t output_id = picture->picture_buffer_id();
 
-  TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
-               "input_id", input_id,
+  TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface", "input_id", input_id,
                "output_id", output_id);
 
   DVLOG(3) << "Outputting VASurface " << va_surface->id()
@@ -414,14 +411,13 @@
   // Notify the client a picture is ready to be displayed.
   ++num_frames_at_client_;
   TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
-  DVLOG(4) << "Notifying output picture id " << output_id
-           << " for input "<< input_id << " is ready";
+  DVLOG(4) << "Notifying output picture id " << output_id << " for input "
+           << input_id << " is ready";
   // TODO(posciak): Use visible size from decoder here instead
   // (crbug.com/402760). Passing (0, 0) results in the client using the
   // visible size extracted from the container instead.
   if (client_)
-    client_->PictureReady(media::Picture(output_id, input_id,
-                                         gfx::Rect(0, 0),
+    client_->PictureReady(media::Picture(output_id, input_id, gfx::Rect(0, 0),
                                          picture->AllowOverlay()));
 }
 
@@ -452,7 +448,7 @@
     const media::BitstreamBuffer& bitstream_buffer) {
   DCHECK_EQ(message_loop_, base::MessageLoop::current());
   TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
-      bitstream_buffer.id());
+               bitstream_buffer.id());
 
   DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
            << " size: " << (int)bitstream_buffer.size();
@@ -506,7 +502,7 @@
       // already queued up. Otherwise will stop decoding.
       if (input_buffers_.empty())
         return false;
-      // else fallthrough
+    // else fallthrough
     case kDecoding:
     case kIdle:
       DCHECK(!input_buffers_.empty());
@@ -537,8 +533,8 @@
   int32_t id = curr_input_buffer_->id;
   curr_input_buffer_.reset();
   DVLOG(4) << "End of input buffer " << id;
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &Client::NotifyEndOfBitstreamBuffer, client_, id));
+  message_loop_->PostTask(
+      FROM_HERE, base::Bind(&Client::NotifyEndOfBitstreamBuffer, client_, id));
 
   --num_stream_bufs_at_decoder_;
   TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
@@ -592,10 +588,11 @@
     switch (res) {
       case AcceleratedVideoDecoder::kAllocateNewSurfaces:
         DVLOG(1) << "Decoder requesting a new set of surfaces";
-        message_loop_->PostTask(FROM_HERE, base::Bind(
-            &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
-                decoder_->GetRequiredNumOfPictures(),
-                decoder_->GetPicSize()));
+        message_loop_->PostTask(
+            FROM_HERE,
+            base::Bind(&VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange,
+                       weak_this_, decoder_->GetRequiredNumOfPictures(),
+                       decoder_->GetPicSize()));
         // We'll get rescheduled once ProvidePictureBuffers() finishes.
         return;
 
@@ -654,8 +651,10 @@
     // as the result, but not all have executed yet. Post ourselves after them
     // to let them release surfaces.
     DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
-    message_loop_->PostTask(FROM_HERE, base::Bind(
-        &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_));
+    message_loop_->PostTask(
+        FROM_HERE,
+        base::Bind(&VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange,
+                   weak_this_));
     return;
   }
 
@@ -673,8 +672,8 @@
   pictures_.clear();
 
   // And ask for a new set as requested.
-  DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: "
-           << requested_pic_size_.ToString();
+  DVLOG(1) << "Requesting " << requested_num_pics_
+           << " pictures of size: " << requested_pic_size_.ToString();
 
   message_loop_->PostTask(
       FROM_HERE,
@@ -710,7 +709,7 @@
       break;
 
     case kDecoding:
-      // Decoder already running, fallthrough.
+    // Decoder already running, fallthrough.
     case kResetting:
       // When resetting, allow accumulating bitstream buffers, so that
       // the client can queue after-seek-buffers while we are finishing with
@@ -718,8 +717,8 @@
       break;
 
     default:
-      RETURN_AND_NOTIFY_ON_FAILURE(false,
-          "Decode request from client in invalid state: " << state_,
+      RETURN_AND_NOTIFY_ON_FAILURE(
+          false, "Decode request from client in invalid state: " << state_,
           PLATFORM_FAILURE, );
       break;
   }
@@ -744,10 +743,11 @@
   while (!output_buffers_.empty())
     output_buffers_.pop();
 
-  RETURN_AND_NOTIFY_ON_FAILURE(
-      buffers.size() >= requested_num_pics_,
-      "Got an invalid number of picture buffers. (Got " << buffers.size()
-      << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
+  RETURN_AND_NOTIFY_ON_FAILURE(buffers.size() >= requested_num_pics_,
+                               "Got an invalid number of picture buffers. (Got "
+                                   << buffers.size() << ", requested "
+                                   << requested_num_pics_ << ")",
+                               INVALID_ARGUMENT, );
   DCHECK(requested_pic_size_ == buffers[0].size());
 
   std::vector<VASurfaceID> va_surface_ids;
@@ -820,8 +820,9 @@
   // Put the decoder in idle state, ready to resume.
   decoder_->Reset();
 
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
+  message_loop_->PostTask(
+      FROM_HERE,
+      base::Bind(&VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
 }
 
 void VaapiVideoDecodeAccelerator::Flush() {
@@ -859,8 +860,8 @@
 
   state_ = kIdle;
 
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &Client::NotifyFlushDone, client_));
+  message_loop_->PostTask(FROM_HERE,
+                          base::Bind(&Client::NotifyFlushDone, client_));
 
   DVLOG(1) << "Flush finished";
 }
@@ -881,8 +882,9 @@
     ReturnCurrInputBuffer_Locked();
 
   // And let client know that we are done with reset.
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
+  message_loop_->PostTask(
+      FROM_HERE,
+      base::Bind(&VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
 }
 
 void VaapiVideoDecodeAccelerator::Reset() {
@@ -896,9 +898,9 @@
 
   // Drop all remaining input buffers, if present.
   while (!input_buffers_.empty()) {
-    message_loop_->PostTask(FROM_HERE, base::Bind(
-        &Client::NotifyEndOfBitstreamBuffer, client_,
-        input_buffers_.front()->id));
+    message_loop_->PostTask(
+        FROM_HERE, base::Bind(&Client::NotifyEndOfBitstreamBuffer, client_,
+                              input_buffers_.front()->id));
     input_buffers_.pop();
   }
 
@@ -928,16 +930,17 @@
     // Decoder requested a new surface set while we were waiting for it to
     // finish the last DecodeTask, running at the time of Reset().
     // Let the surface set change finish first before resetting.
-    message_loop_->PostTask(FROM_HERE, base::Bind(
-        &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
+    message_loop_->PostTask(
+        FROM_HERE,
+        base::Bind(&VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
     return;
   }
 
   num_stream_bufs_at_decoder_ = 0;
   state_ = kIdle;
 
-  message_loop_->PostTask(FROM_HERE, base::Bind(
-      &Client::NotifyResetDone, client_));
+  message_loop_->PostTask(FROM_HERE,
+                          base::Bind(&Client::NotifyResetDone, client_));
 
   // The client might have given us new buffers via Decode() while we were
   // resetting and might be waiting for our move, and not call Decode() anymore
@@ -1057,8 +1060,7 @@
   DCHECK(vaapi_dec_);
 }
 
-VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {
-}
+VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {}
 
 scoped_refptr<H264Picture>
 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
@@ -1157,8 +1159,7 @@
   pic_param.num_ref_frames = sps->max_num_ref_frames;
 
   if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
-                                    sizeof(pic_param),
-                                    &pic_param))
+                                    sizeof(pic_param), &pic_param))
     return false;
 
   VAIQMatrixBufferH264 iq_matrix_buf;
@@ -1187,8 +1188,7 @@
   }
 
   return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
-                                      sizeof(iq_matrix_buf),
-                                      &iq_matrix_buf);
+                                      sizeof(iq_matrix_buf), &iq_matrix_buf);
 }
 
 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
@@ -1288,8 +1288,7 @@
   }
 
   if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
-                                    sizeof(slice_param),
-                                    &slice_param))
+                                    sizeof(slice_param), &slice_param))
     return false;
 
   // Can't help it, blame libva...
@@ -1390,8 +1389,7 @@
   DCHECK(vaapi_dec_);
 }
 
-VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::~VaapiVP8Accelerator() {
-}
+VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::~VaapiVP8Accelerator() {}
 
 scoped_refptr<VP8Picture>
 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() {
@@ -1446,9 +1444,8 @@
 #undef CLAMP_Q
   }
 
-  if (!vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
-                                    sizeof(VAIQMatrixBufferVP8),
-                                    &iq_matrix_buf))
+  if (!vaapi_wrapper_->SubmitBuffer(
+          VAIQMatrixBufferType, sizeof(VAIQMatrixBufferVP8), &iq_matrix_buf))
     return false;
 
   VAProbabilityDataBufferVP8 prob_buf;
@@ -1533,13 +1530,13 @@
     pic_param.loop_filter_level[i] = lf_level;
   }
 
-  static_assert(arraysize(lf_hdr.ref_frame_delta) ==
-                    arraysize(pic_param.loop_filter_deltas_ref_frame) &&
-                arraysize(lf_hdr.mb_mode_delta) ==
-                    arraysize(pic_param.loop_filter_deltas_mode) &&
-                arraysize(lf_hdr.ref_frame_delta) ==
-                    arraysize(lf_hdr.mb_mode_delta),
-                "loop filter deltas arrays size mismatch");
+  static_assert(
+      arraysize(lf_hdr.ref_frame_delta) ==
+              arraysize(pic_param.loop_filter_deltas_ref_frame) &&
+          arraysize(lf_hdr.mb_mode_delta) ==
+              arraysize(pic_param.loop_filter_deltas_mode) &&
+          arraysize(lf_hdr.ref_frame_delta) == arraysize(lf_hdr.mb_mode_delta),
+      "loop filter deltas arrays size mismatch");
   for (size_t i = 0; i < arraysize(lf_hdr.ref_frame_delta); ++i) {
     pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i];
     pic_param.loop_filter_deltas_mode[i] = lf_hdr.mb_mode_delta[i];
@@ -1588,8 +1585,7 @@
 
   void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data);
   if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType,
-                                    frame_hdr->frame_size,
-                                    non_const_ptr))
+                                    frame_hdr->frame_size, non_const_ptr))
     return false;
 
   scoped_refptr<VaapiDecodeSurface> dec_surface =
@@ -1777,4 +1773,4 @@
   return VaapiWrapper::GetSupportedDecodeProfiles();
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator.h b/media/gpu/vaapi_video_decode_accelerator.h
similarity index 94%
rename from content/common/gpu/media/vaapi_video_decode_accelerator.h
rename to media/gpu/vaapi_video_decode_accelerator.h
index 9f2b66b..f2db989 100644
--- a/content/common/gpu/media/vaapi_video_decode_accelerator.h
+++ b/media/gpu/vaapi_video_decode_accelerator.h
@@ -5,8 +5,8 @@
 // This file contains an implementation of VideoDecoderAccelerator
 // that utilizes hardware video decoder present on Intel CPUs.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -26,11 +26,11 @@
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
-#include "content/common/gpu/media/shared_memory_region.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
 #include "media/base/bitstream_buffer.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "media/video/picture.h"
 #include "media/video/video_decode_accelerator.h"
 
@@ -38,7 +38,7 @@
 class GLImage;
 }
 
-namespace content {
+namespace media {
 
 class AcceleratedVideoDecoder;
 class VaapiPicture;
@@ -51,7 +51,7 @@
 // ChildThread.  A few methods on it are called on the decoder thread which is
 // stopped during |this->Destroy()|, so any tasks posted to the decoder thread
 // can assume |*this| is still alive.  See |weak_this_| below for more details.
-class CONTENT_EXPORT VaapiVideoDecodeAccelerator
+class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
     : public media::VideoDecodeAccelerator {
  public:
   class VaapiDecodeSurface;
@@ -77,7 +77,7 @@
       override;
 
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedProfiles();
+  GetSupportedProfiles();
 
  private:
   class VaapiH264Accelerator;
@@ -215,7 +215,7 @@
   };
 
   // Queue for incoming input buffers.
-  typedef std::queue<linked_ptr<InputBuffer> > InputBuffers;
+  typedef std::queue<linked_ptr<InputBuffer>> InputBuffers;
   InputBuffers input_buffers_;
   // Signalled when input buffers are queued onto the input_buffers_ queue.
   base::ConditionVariable input_ready_;
@@ -317,6 +317,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiVideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/vaapi_video_encode_accelerator.cc b/media/gpu/vaapi_video_encode_accelerator.cc
similarity index 93%
rename from content/common/gpu/media/vaapi_video_encode_accelerator.cc
rename to media/gpu/vaapi_video_encode_accelerator.cc
index dfa7f9f..5eea88e 100644
--- a/content/common/gpu/media/vaapi_video_encode_accelerator.cc
+++ b/media/gpu/vaapi_video_encode_accelerator.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_video_encode_accelerator.h"
+#include "media/gpu/vaapi_video_encode_accelerator.h"
 
 #include <string.h>
 
@@ -14,22 +14,22 @@
 #include "base/macros.h"
 #include "base/metrics/histogram.h"
 #include "base/numerics/safe_conversions.h"
-#include "content/common/gpu/media/h264_dpb.h"
-#include "content/common/gpu/media/shared_memory_region.h"
 #include "media/base/bind_to_current_loop.h"
+#include "media/gpu/h264_dpb.h"
+#include "media/gpu/shared_memory_region.h"
 #include "third_party/libva/va/va_enc_h264.h"
 
 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): "
 
-#define NOTIFY_ERROR(error, msg)                         \
-  do {                                                   \
-    SetState(kError);                                    \
-    LOG(ERROR) << msg;                                   \
-    LOG(ERROR) << "Calling NotifyError(" << error << ")";\
-    NotifyError(error);                                  \
+#define NOTIFY_ERROR(error, msg)                          \
+  do {                                                    \
+    SetState(kError);                                     \
+    LOG(ERROR) << msg;                                    \
+    LOG(ERROR) << "Calling NotifyError(" << error << ")"; \
+    NotifyError(error);                                   \
   } while (0)
 
-namespace content {
+namespace media {
 
 namespace {
 // Need 2 surfaces for each frame: one for input data and one for
@@ -74,10 +74,8 @@
 // UMA errors that the VaapiVideoEncodeAccelerator class reports.
 enum VAVEAEncoderFailure {
   VAAPI_ERROR = 0,
-  // UMA requires that max must be greater than 1.
-  VAVEA_ENCODER_FAILURES_MAX = 2,
+  VAVEA_ENCODER_FAILURES_MAX,
 };
-
 }
 
 // Round |value| up to |alignment|, which must be a power of 2.
@@ -88,10 +86,8 @@
 }
 
 static void ReportToUMA(VAVEAEncoderFailure failure) {
-  UMA_HISTOGRAM_ENUMERATION(
-      "Media.VAVEA.EncoderFailure",
-      failure,
-      VAVEA_ENCODER_FAILURES_MAX);
+  UMA_HISTOGRAM_ENUMERATION("Media.VAVEA.EncoderFailure", failure,
+                            VAVEA_ENCODER_FAILURES_MAX + 1);
 }
 
 struct VaapiVideoEncodeAccelerator::InputFrameRef {
@@ -178,15 +174,15 @@
 
   const SupportedProfiles& profiles = GetSupportedProfiles();
   auto profile = find_if(profiles.begin(), profiles.end(),
-      [output_profile](const SupportedProfile& profile) {
-        return profile.profile == output_profile;
-      });
+                         [output_profile](const SupportedProfile& profile) {
+                           return profile.profile == output_profile;
+                         });
   if (profile == profiles.end()) {
     DVLOGF(1) << "Unsupported output profile " << output_profile;
     return false;
   }
   if (input_visible_size.width() > profile->max_resolution.width() ||
-        input_visible_size.height() > profile->max_resolution.height()) {
+      input_visible_size.height() > profile->max_resolution.height()) {
     DVLOGF(1) << "Input size too big: " << input_visible_size.ToString()
               << ", max supported size: " << profile->max_resolution.ToString();
     return false;
@@ -375,8 +371,7 @@
 #undef SPS_TO_SP
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncSequenceParameterBufferType,
-                                    sizeof(seq_param),
-                                    &seq_param))
+                                    sizeof(seq_param), &seq_param))
     return false;
 
   VAEncPictureParameterBufferH264 pic_param;
@@ -414,8 +409,7 @@
 #undef PPS_TO_PP_PF
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncPictureParameterBufferType,
-                                    sizeof(pic_param),
-                                    &pic_param))
+                                    sizeof(pic_param), &pic_param))
     return false;
 
   VAEncSliceParameterBufferH264 slice_param;
@@ -446,8 +440,7 @@
   }
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncSliceParameterBufferType,
-                                    sizeof(slice_param),
-                                    &slice_param))
+                                    sizeof(slice_param), &slice_param))
     return false;
 
   VAEncMiscParameterRateControl rate_control_param;
@@ -459,8 +452,7 @@
   rate_control_param.rc_flags.bits.disable_frame_skip = true;
 
   if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
-          VAEncMiscParameterTypeRateControl,
-          sizeof(rate_control_param),
+          VAEncMiscParameterTypeRateControl, sizeof(rate_control_param),
           &rate_control_param))
     return false;
 
@@ -468,8 +460,7 @@
   memset(&framerate_param, 0, sizeof(framerate_param));
   framerate_param.framerate = framerate_;
   if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
-          VAEncMiscParameterTypeFrameRate,
-          sizeof(framerate_param),
+          VAEncMiscParameterTypeFrameRate, sizeof(framerate_param),
           &framerate_param))
     return false;
 
@@ -477,9 +468,8 @@
   memset(&hrd_param, 0, sizeof(hrd_param));
   hrd_param.buffer_size = cpb_size_;
   hrd_param.initial_buffer_fullness = cpb_size_ / 2;
-  if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(VAEncMiscParameterTypeHRD,
-                                                  sizeof(hrd_param),
-                                                  &hrd_param))
+  if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
+          VAEncMiscParameterTypeHRD, sizeof(hrd_param), &hrd_param))
     return false;
 
   return true;
@@ -497,8 +487,7 @@
   par_buffer.bit_length = packed_sps_.BytesInBuffer() * 8;
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
-                                    sizeof(par_buffer),
-                                    &par_buffer))
+                                    sizeof(par_buffer), &par_buffer))
     return false;
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderDataBufferType,
@@ -512,8 +501,7 @@
   par_buffer.bit_length = packed_pps_.BytesInBuffer() * 8;
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
-                                    sizeof(par_buffer),
-                                    &par_buffer))
+                                    sizeof(par_buffer), &par_buffer))
     return false;
 
   if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderDataBufferType,
@@ -764,9 +752,8 @@
   // Early-exit encoder tasks if they are running and join the thread.
   if (encoder_thread_.IsRunning()) {
     encoder_thread_.message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&VaapiVideoEncodeAccelerator::DestroyTask,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&VaapiVideoEncodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
     encoder_thread_.Stop();
   }
 
@@ -853,10 +840,12 @@
   current_sps_.cpb_size_scale = kCPBSizeScale;
   current_sps_.bit_rate_value_minus1[0] =
       (bitrate_ >>
-          (kBitRateScale + media::H264SPS::kBitRateScaleConstantTerm)) - 1;
+       (kBitRateScale + media::H264SPS::kBitRateScaleConstantTerm)) -
+      1;
   current_sps_.cpb_size_value_minus1[0] =
       (cpb_size_ >>
-          (kCPBSizeScale + media::H264SPS::kCPBSizeScaleConstantTerm)) - 1;
+       (kCPBSizeScale + media::H264SPS::kCPBSizeScaleConstantTerm)) -
+      1;
   current_sps_.cbr_flag[0] = true;
   current_sps_.initial_cpb_removal_delay_length_minus_1 =
       media::H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
@@ -960,13 +949,13 @@
       packed_sps_.AppendBool(current_sps_.low_delay_hrd_flag);
 
     packed_sps_.AppendBool(false);  // pic_struct_present_flag
-    packed_sps_.AppendBool(true);  // bitstream_restriction_flag
+    packed_sps_.AppendBool(true);   // bitstream_restriction_flag
 
     packed_sps_.AppendBool(false);  // motion_vectors_over_pic_boundaries_flag
-    packed_sps_.AppendUE(2);  // max_bytes_per_pic_denom
-    packed_sps_.AppendUE(1);  // max_bits_per_mb_denom
-    packed_sps_.AppendUE(16);  // log2_max_mv_length_horizontal
-    packed_sps_.AppendUE(16);  // log2_max_mv_length_vertical
+    packed_sps_.AppendUE(2);        // max_bytes_per_pic_denom
+    packed_sps_.AppendUE(1);        // max_bits_per_mb_denom
+    packed_sps_.AppendUE(16);       // log2_max_mv_length_horizontal
+    packed_sps_.AppendUE(16);       // log2_max_mv_length_vertical
 
     // Explicitly set max_num_reorder_frames to 0 to allow the decoder to
     // output pictures early.
@@ -1065,10 +1054,8 @@
 }
 
 VaapiVideoEncodeAccelerator::EncodeJob::EncodeJob()
-    : coded_buffer(VA_INVALID_ID), keyframe(false) {
-}
+    : coded_buffer(VA_INVALID_ID), keyframe(false) {}
 
-VaapiVideoEncodeAccelerator::EncodeJob::~EncodeJob() {
-}
+VaapiVideoEncodeAccelerator::EncodeJob::~EncodeJob() {}
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_video_encode_accelerator.h b/media/gpu/vaapi_video_encode_accelerator.h
similarity index 91%
rename from content/common/gpu/media/vaapi_video_encode_accelerator.h
rename to media/gpu/vaapi_video_encode_accelerator.h
index 8a4c88c..1d5fb9d 100644
--- a/content/common/gpu/media/vaapi_video_encode_accelerator.h
+++ b/media/gpu/vaapi_video_encode_accelerator.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
+#ifndef MEDIA_GPU_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -15,19 +15,19 @@
 #include "base/macros.h"
 #include "base/memory/linked_ptr.h"
 #include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/h264_dpb.h"
-#include "content/common/gpu/media/va_surface.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
 #include "media/filters/h264_bitstream_buffer.h"
+#include "media/gpu/h264_dpb.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/va_surface.h"
+#include "media/gpu/vaapi_wrapper.h"
 #include "media/video/video_encode_accelerator.h"
 
-namespace content {
+namespace media {
 
 // A VideoEncodeAccelerator implementation that uses VA-API
 // (http://www.freedesktop.org/wiki/Software/vaapi) for HW-accelerated
 // video encode.
-class CONTENT_EXPORT VaapiVideoEncodeAccelerator
+class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
     : public media::VideoEncodeAccelerator {
  public:
   VaapiVideoEncodeAccelerator();
@@ -50,7 +50,7 @@
 
  private:
   // Reference picture list.
-  typedef std::list<scoped_refptr<VASurface> > RefPicList;
+  typedef std::list<scoped_refptr<VASurface>> RefPicList;
 
   // Encode job for one frame. Created when an input frame is awaiting and
   // enough resources are available to proceed. Once the job is prepared and
@@ -232,13 +232,13 @@
   VASurface::ReleaseCB va_surface_release_cb_;
 
   // VideoFrames passed from the client, waiting to be encoded.
-  std::queue<linked_ptr<InputFrameRef> > encoder_input_queue_;
+  std::queue<linked_ptr<InputFrameRef>> encoder_input_queue_;
 
   // BitstreamBuffers mapped, ready to be filled.
-  std::queue<linked_ptr<BitstreamBufferRef> > available_bitstream_buffers_;
+  std::queue<linked_ptr<BitstreamBufferRef>> available_bitstream_buffers_;
 
   // Jobs submitted for encode, awaiting bitstream buffers to become available.
-  std::queue<linked_ptr<EncodeJob> > submitted_encode_jobs_;
+  std::queue<linked_ptr<EncodeJob>> submitted_encode_jobs_;
 
   // Encoder thread. All tasks are executed on it.
   base::Thread encoder_thread_;
@@ -262,6 +262,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiVideoEncodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
+#endif  // MEDIA_GPU_VAAPI_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/vaapi_wrapper.cc b/media/gpu/vaapi_wrapper.cc
similarity index 87%
rename from content/common/gpu/media/vaapi_wrapper.cc
rename to media/gpu/vaapi_wrapper.cc
index ab4e176..c40f8bc 100644
--- a/content/common/gpu/media/vaapi_wrapper.cc
+++ b/media/gpu/vaapi_wrapper.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_wrapper.h"
 
 #include <dlfcn.h>
 #include <string.h>
@@ -14,11 +14,14 @@
 #include "base/numerics/safe_conversions.h"
 #include "base/sys_info.h"
 #include "build/build_config.h"
+
 // Auto-generated for dlopen libva libraries
-#include "content/common/gpu/media/va_stubs.h"
-#include "content/common/gpu/media/vaapi_picture.h"
+#include "media/gpu/va_stubs.h"
+
+#include "media/gpu/vaapi_picture.h"
 #include "third_party/libyuv/include/libyuv.h"
 #include "ui/gl/gl_bindings.h"
+
 #if defined(USE_X11)
 #include "ui/gfx/x/x11_types.h"
 #elif defined(USE_OZONE)
@@ -28,34 +31,33 @@
 #include "ui/ozone/public/surface_factory_ozone.h"
 #endif  // USE_X11
 
-using content_common_gpu_media::kModuleVa;
+using media_gpu::kModuleVa;
 #if defined(USE_X11)
-using content_common_gpu_media::kModuleVa_x11;
+using media_gpu::kModuleVa_x11;
 #elif defined(USE_OZONE)
-using content_common_gpu_media::kModuleVa_drm;
+using media_gpu::kModuleVa_drm;
 #endif  // USE_X11
-using content_common_gpu_media::InitializeStubs;
-using content_common_gpu_media::StubPathMap;
+using media_gpu::InitializeStubs;
+using media_gpu::StubPathMap;
 
-#define LOG_VA_ERROR_AND_REPORT(va_error, err_msg)         \
-  do {                                                     \
-    LOG(ERROR) << err_msg                                  \
-             << " VA error: " << vaErrorStr(va_error);     \
-    report_error_to_uma_cb_.Run();                         \
+#define LOG_VA_ERROR_AND_REPORT(va_error, err_msg)                  \
+  do {                                                              \
+    LOG(ERROR) << err_msg << " VA error: " << vaErrorStr(va_error); \
+    report_error_to_uma_cb_.Run();                                  \
   } while (0)
 
-#define VA_LOG_ON_ERROR(va_error, err_msg)                 \
-  do {                                                     \
-    if ((va_error) != VA_STATUS_SUCCESS)                   \
-      LOG_VA_ERROR_AND_REPORT(va_error, err_msg);          \
+#define VA_LOG_ON_ERROR(va_error, err_msg)        \
+  do {                                            \
+    if ((va_error) != VA_STATUS_SUCCESS)          \
+      LOG_VA_ERROR_AND_REPORT(va_error, err_msg); \
   } while (0)
 
-#define VA_SUCCESS_OR_RETURN(va_error, err_msg, ret)       \
-  do {                                                     \
-    if ((va_error) != VA_STATUS_SUCCESS) {                 \
-      LOG_VA_ERROR_AND_REPORT(va_error, err_msg);          \
-      return (ret);                                        \
-    }                                                      \
+#define VA_SUCCESS_OR_RETURN(va_error, err_msg, ret) \
+  do {                                               \
+    if ((va_error) != VA_STATUS_SUCCESS) {           \
+      LOG_VA_ERROR_AND_REPORT(va_error, err_msg);    \
+      return (ret);                                  \
+    }                                                \
   } while (0)
 
 #if defined(USE_OZONE)
@@ -88,7 +90,7 @@
 }  // namespace
 #endif
 
-namespace content {
+namespace media {
 
 // Maximum framerate of encoded profile. This value is an arbitary limit
 // and not taken from HW documentation.
@@ -134,13 +136,11 @@
     VaapiWrapper::CodecMode mode) {
   std::vector<VAConfigAttrib> required_attribs;
   required_attribs.insert(
-      required_attribs.end(),
-      kCommonVAConfigAttribs,
+      required_attribs.end(), kCommonVAConfigAttribs,
       kCommonVAConfigAttribs + arraysize(kCommonVAConfigAttribs));
   if (mode == VaapiWrapper::kEncode) {
     required_attribs.insert(
-        required_attribs.end(),
-        kEncodeVAConfigAttribs,
+        required_attribs.end(), kEncodeVAConfigAttribs,
         kEncodeVAConfigAttribs + arraysize(kEncodeVAConfigAttribs));
   }
   return required_attribs;
@@ -270,7 +270,7 @@
 void VaapiWrapper::TryToSetVADisplayAttributeToLocalGPU() {
   base::AutoLock auto_lock(*va_lock_);
   VADisplayAttribute item = {VADisplayAttribRenderMode,
-                             1,  // At least support '_LOCAL_OVERLAY'.
+                             1,   // At least support '_LOCAL_OVERLAY'.
                              -1,  // The maximum possible support 'ALL'.
                              VA_RENDER_MODE_LOCAL_GPU,
                              VA_DISPLAY_ATTRIB_SETTABLE};
@@ -281,8 +281,8 @@
 }
 
 // static
-VAProfile VaapiWrapper::ProfileToVAProfile(
-    media::VideoCodecProfile profile, CodecMode mode) {
+VAProfile VaapiWrapper::ProfileToVAProfile(media::VideoCodecProfile profile,
+                                           CodecMode mode) {
   VAProfile va_profile = VAProfileNone;
   for (size_t i = 0; i < arraysize(kProfileMap); ++i) {
     if (kProfileMap[i].profile == profile) {
@@ -298,7 +298,7 @@
     // try constrained baseline and hope this is what it actually is
     // (which in practice is true for a great majority of cases).
     if (profile_infos_.Get().IsProfileSupported(
-          mode, VAProfileH264ConstrainedBaseline)) {
+            mode, VAProfileH264ConstrainedBaseline)) {
       va_profile = VAProfileH264ConstrainedBaseline;
       DVLOG(1) << "Fall back to constrained baseline profile.";
     }
@@ -315,7 +315,7 @@
 
   std::vector<VAConfigAttrib> required_attribs = GetRequiredAttribs(mode);
   VAEntrypoint entrypoint =
-      (mode == kEncode ? VAEntrypointEncSlice: VAEntrypointVLD);
+      (mode == kEncode ? VAEntrypointEncSlice : VAEntrypointVLD);
 
   base::AutoLock auto_lock(*va_lock_);
   for (const auto& va_profile : va_profiles) {
@@ -324,9 +324,7 @@
     if (!AreAttribsSupported_Locked(va_profile, entrypoint, required_attribs))
       continue;
     ProfileInfo profile_info;
-    if (!GetMaxResolution_Locked(va_profile,
-                                 entrypoint,
-                                 required_attribs,
+    if (!GetMaxResolution_Locked(va_profile, entrypoint, required_attribs,
                                  &profile_info.max_resolution)) {
       LOG(ERROR) << "GetMaxResolution failed for va_profile " << va_profile
                  << " and entrypoint " << entrypoint;
@@ -380,8 +378,8 @@
       base::checked_cast<size_t>(max_profiles));
 
   int num_supported_profiles;
-  VAStatus va_res = vaQueryConfigProfiles(
-      va_display_, &supported_profiles[0], &num_supported_profiles);
+  VAStatus va_res = vaQueryConfigProfiles(va_display_, &supported_profiles[0],
+                                          &num_supported_profiles);
   VA_SUCCESS_OR_RETURN(va_res, "vaQueryConfigProfiles failed", false);
   if (num_supported_profiles < 0 || num_supported_profiles > max_profiles) {
     LOG(ERROR) << "vaQueryConfigProfiles returned: " << num_supported_profiles;
@@ -402,8 +400,7 @@
       base::checked_cast<size_t>(max_entrypoints));
 
   int num_supported_entrypoints;
-  VAStatus va_res = vaQueryConfigEntrypoints(va_display_,
-                                             va_profile,
+  VAStatus va_res = vaQueryConfigEntrypoints(va_display_, va_profile,
                                              &supported_entrypoints[0],
                                              &num_supported_entrypoints);
   VA_SUCCESS_OR_RETURN(va_res, "vaQueryConfigEntrypoints failed", false);
@@ -414,8 +411,7 @@
     return false;
   }
 
-  if (std::find(supported_entrypoints.begin(),
-                supported_entrypoints.end(),
+  if (std::find(supported_entrypoints.begin(), supported_entrypoints.end(),
                 entrypoint) == supported_entrypoints.end()) {
     DVLOG(1) << "Unsupported entrypoint";
     return false;
@@ -433,8 +429,8 @@
   for (size_t i = 0; i < required_attribs.size(); ++i)
     attribs[i].value = 0;
 
-  VAStatus va_res = vaGetConfigAttributes(
-      va_display_, va_profile, entrypoint, &attribs[0], attribs.size());
+  VAStatus va_res = vaGetConfigAttributes(va_display_, va_profile, entrypoint,
+                                          &attribs[0], attribs.size());
   VA_SUCCESS_OR_RETURN(va_res, "vaGetConfigAttributes failed", false);
 
   for (size_t i = 0; i < required_attribs.size(); ++i) {
@@ -456,21 +452,17 @@
     gfx::Size* resolution) {
   va_lock_->AssertAcquired();
   VAConfigID va_config_id;
-  VAStatus va_res = vaCreateConfig(
-      va_display_,
-      va_profile,
-      entrypoint,
-      &required_attribs[0],
-      required_attribs.size(),
-      &va_config_id);
+  VAStatus va_res =
+      vaCreateConfig(va_display_, va_profile, entrypoint, &required_attribs[0],
+                     required_attribs.size(), &va_config_id);
   VA_SUCCESS_OR_RETURN(va_res, "vaCreateConfig failed", false);
 
   // Calls vaQuerySurfaceAttributes twice. The first time is to get the number
   // of attributes to prepare the space and the second time is to get all
   // attributes.
   unsigned int num_attribs;
-  va_res = vaQuerySurfaceAttributes(
-      va_display_, va_config_id, nullptr, &num_attribs);
+  va_res = vaQuerySurfaceAttributes(va_display_, va_config_id, nullptr,
+                                    &num_attribs);
   VA_SUCCESS_OR_RETURN(va_res, "vaQuerySurfaceAttributes failed", false);
   if (!num_attribs)
     return false;
@@ -478,8 +470,8 @@
   std::vector<VASurfaceAttrib> attrib_list(
       base::checked_cast<size_t>(num_attribs));
 
-  va_res = vaQuerySurfaceAttributes(
-      va_display_, va_config_id, &attrib_list[0], &num_attribs);
+  va_res = vaQuerySurfaceAttributes(va_display_, va_config_id, &attrib_list[0],
+                                    &num_attribs);
   VA_SUCCESS_OR_RETURN(va_res, "vaQuerySurfaceAttributes failed", false);
 
   resolution->SetSize(0, 0);
@@ -501,15 +493,12 @@
   TryToSetVADisplayAttributeToLocalGPU();
 
   VAEntrypoint entrypoint =
-    (mode == kEncode ? VAEntrypointEncSlice : VAEntrypointVLD);
+      (mode == kEncode ? VAEntrypointEncSlice : VAEntrypointVLD);
   std::vector<VAConfigAttrib> required_attribs = GetRequiredAttribs(mode);
   base::AutoLock auto_lock(*va_lock_);
-  VAStatus va_res = vaCreateConfig(va_display_,
-                                   va_profile,
-                                   entrypoint,
-                                   &required_attribs[0],
-                                   required_attribs.size(),
-                                   &va_config_id_);
+  VAStatus va_res =
+      vaCreateConfig(va_display_, va_profile, entrypoint, &required_attribs[0],
+                     required_attribs.size(), &va_config_id_);
   VA_SUCCESS_OR_RETURN(va_res, "vaCreateConfig failed", false);
 
   return true;
@@ -558,10 +547,9 @@
   }
 
   // And create a context associated with them.
-  va_res = vaCreateContext(va_display_, va_config_id_,
-                           size.width(), size.height(), VA_PROGRESSIVE,
-                           &va_surface_ids_[0], va_surface_ids_.size(),
-                           &va_context_id_);
+  va_res = vaCreateContext(va_display_, va_config_id_, size.width(),
+                           size.height(), VA_PROGRESSIVE, &va_surface_ids_[0],
+                           va_surface_ids_.size(), &va_context_id_);
 
   VA_LOG_ON_ERROR(va_res, "vaCreateContext failed");
   if (va_res != VA_STATUS_SUCCESS) {
@@ -576,7 +564,7 @@
 
 void VaapiWrapper::DestroySurfaces() {
   base::AutoLock auto_lock(*va_lock_);
-  DVLOG(2) << "Destroying " << va_surface_ids_.size()  << " surfaces";
+  DVLOG(2) << "Destroying " << va_surface_ids_.size() << " surfaces";
 
   if (va_context_id_ != VA_INVALID_ID) {
     VAStatus va_res = vaDestroyContext(va_display_, va_context_id_);
@@ -711,9 +699,8 @@
   base::AutoLock auto_lock(*va_lock_);
 
   VABufferID buffer_id;
-  VAStatus va_res = vaCreateBuffer(va_display_, va_context_id_,
-                                   va_buffer_type, size,
-                                   1, buffer, &buffer_id);
+  VAStatus va_res = vaCreateBuffer(va_display_, va_context_id_, va_buffer_type,
+                                   size, 1, buffer, &buffer_id);
   VA_SUCCESS_OR_RETURN(va_res, "Failed to create a VA buffer", false);
 
   switch (va_buffer_type) {
@@ -738,13 +725,9 @@
   base::AutoLock auto_lock(*va_lock_);
 
   VABufferID buffer_id;
-  VAStatus va_res = vaCreateBuffer(va_display_,
-                                   va_context_id_,
-                                   VAEncMiscParameterBufferType,
-                                   sizeof(VAEncMiscParameterBuffer) + size,
-                                   1,
-                                   NULL,
-                                   &buffer_id);
+  VAStatus va_res = vaCreateBuffer(
+      va_display_, va_context_id_, VAEncMiscParameterBufferType,
+      sizeof(VAEncMiscParameterBuffer) + size, 1, NULL, &buffer_id);
   VA_SUCCESS_OR_RETURN(va_res, "Failed to create a VA buffer", false);
 
   void* data_ptr = NULL;
@@ -787,13 +770,9 @@
 
 bool VaapiWrapper::CreateCodedBuffer(size_t size, VABufferID* buffer_id) {
   base::AutoLock auto_lock(*va_lock_);
-  VAStatus va_res = vaCreateBuffer(va_display_,
-                                   va_context_id_,
-                                   VAEncCodedBufferType,
-                                   size,
-                                   1,
-                                   NULL,
-                                   buffer_id);
+  VAStatus va_res =
+      vaCreateBuffer(va_display_, va_context_id_, VAEncCodedBufferType, size, 1,
+                     NULL, buffer_id);
   VA_SUCCESS_OR_RETURN(va_res, "Failed to create a coded buffer", false);
 
   const auto is_new_entry = coded_buffers_.insert(*buffer_id).second;
@@ -805,8 +784,7 @@
   base::AutoLock auto_lock(*va_lock_);
 
   for (std::set<VABufferID>::const_iterator iter = coded_buffers_.begin();
-       iter != coded_buffers_.end();
-       ++iter) {
+       iter != coded_buffers_.end(); ++iter) {
     VAStatus va_res = vaDestroyBuffer(va_display_, *iter);
     VA_LOG_ON_ERROR(va_res, "vaDestroyBuffer failed");
   }
@@ -822,24 +800,20 @@
   DVLOG(4) << "Target VA surface " << va_surface_id;
 
   // Get ready to execute for given surface.
-  VAStatus va_res = vaBeginPicture(va_display_, va_context_id_,
-                                   va_surface_id);
+  VAStatus va_res = vaBeginPicture(va_display_, va_context_id_, va_surface_id);
   VA_SUCCESS_OR_RETURN(va_res, "vaBeginPicture failed", false);
 
   if (pending_va_bufs_.size() > 0) {
     // Commit parameter and slice buffers.
-    va_res = vaRenderPicture(va_display_,
-                             va_context_id_,
-                             &pending_va_bufs_[0],
+    va_res = vaRenderPicture(va_display_, va_context_id_, &pending_va_bufs_[0],
                              pending_va_bufs_.size());
     VA_SUCCESS_OR_RETURN(va_res, "vaRenderPicture for va_bufs failed", false);
   }
 
   if (pending_slice_bufs_.size() > 0) {
-    va_res = vaRenderPicture(va_display_,
-                             va_context_id_,
-                             &pending_slice_bufs_[0],
-                             pending_slice_bufs_.size());
+    va_res =
+        vaRenderPicture(va_display_, va_context_id_, &pending_slice_bufs_[0],
+                        pending_slice_bufs_.size());
     VA_SUCCESS_OR_RETURN(va_res, "vaRenderPicture for slices failed", false);
   }
 
@@ -867,12 +841,9 @@
   VA_SUCCESS_OR_RETURN(va_res, "Failed syncing surface", false);
 
   // Put the data into an X Pixmap.
-  va_res = vaPutSurface(va_display_,
-                        va_surface_id,
-                        x_pixmap,
-                        0, 0, dest_size.width(), dest_size.height(),
-                        0, 0, dest_size.width(), dest_size.height(),
-                        NULL, 0, 0);
+  va_res = vaPutSurface(va_display_, va_surface_id, x_pixmap, 0, 0,
+                        dest_size.width(), dest_size.height(), 0, 0,
+                        dest_size.width(), dest_size.height(), NULL, 0, 0);
   VA_SUCCESS_OR_RETURN(va_res, "Failed putting surface to pixmap", false);
   return true;
 }
@@ -1010,8 +981,8 @@
   VA_SUCCESS_OR_RETURN(va_res, "Failed syncing surface", false);
 
   VACodedBufferSegment* buffer_segment = NULL;
-  va_res = vaMapBuffer(
-      va_display_, buffer_id, reinterpret_cast<void**>(&buffer_segment));
+  va_res = vaMapBuffer(va_display_, buffer_id,
+                       reinterpret_cast<void**>(&buffer_segment));
   VA_SUCCESS_OR_RETURN(va_res, "vaMapBuffer failed", false);
   DCHECK(target_ptr);
 
@@ -1184,8 +1155,7 @@
   }
 }
 
-VaapiWrapper::LazyProfileInfos::~LazyProfileInfos() {
-}
+VaapiWrapper::LazyProfileInfos::~LazyProfileInfos() {}
 
 std::vector<VaapiWrapper::ProfileInfo>
 VaapiWrapper::LazyProfileInfos::GetSupportedProfileInfosForCodecMode(
@@ -1193,8 +1163,8 @@
   return supported_profiles_[mode];
 }
 
-bool VaapiWrapper::LazyProfileInfos::IsProfileSupported(
-    CodecMode mode, VAProfile va_profile) {
+bool VaapiWrapper::LazyProfileInfos::IsProfileSupported(CodecMode mode,
+                                                        VAProfile va_profile) {
   for (const auto& profile : supported_profiles_[mode]) {
     if (profile.va_profile == va_profile)
       return true;
@@ -1268,7 +1238,7 @@
 
 bool VaapiWrapper::VADisplayState::VAAPIVersionLessThan(int major, int minor) {
   return (major_version_ < major) ||
-      (major_version_ == major && minor_version_ < minor);
+         (major_version_ == major && minor_version_ < minor);
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vaapi_wrapper.h b/media/gpu/vaapi_wrapper.h
similarity index 95%
rename from content/common/gpu/media/vaapi_wrapper.h
rename to media/gpu/vaapi_wrapper.h
index 4394bc3..e5f84a4 100644
--- a/content/common/gpu/media/vaapi_wrapper.h
+++ b/media/gpu/vaapi_wrapper.h
@@ -7,8 +7,8 @@
 // and VaapiVideoEncodeAccelerator for encode, to interface
 // with libva (VA-API library for hardware video codec).
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_WRAPPER_H_
-#define CONTENT_COMMON_GPU_MEDIA_VAAPI_WRAPPER_H_
+#ifndef MEDIA_GPU_VAAPI_WRAPPER_H_
+#define MEDIA_GPU_VAAPI_WRAPPER_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -21,16 +21,17 @@
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/media/va_surface.h"
 #include "media/base/video_decoder_config.h"
 #include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/va_surface.h"
 #include "media/video/jpeg_decode_accelerator.h"
 #include "media/video/video_decode_accelerator.h"
 #include "media/video/video_encode_accelerator.h"
 #include "third_party/libva/va/va.h"
 #include "third_party/libva/va/va_vpp.h"
 #include "ui/gfx/geometry/size.h"
+
 #if defined(USE_X11)
 #include "third_party/libva/va/va_x11.h"
 #endif  // USE_X11
@@ -41,7 +42,7 @@
 }
 #endif
 
-namespace content {
+namespace media {
 
 // This class handles VA-API calls and ensures proper locking of VA-API calls
 // to libva, the userspace shim to the HW codec driver. libva is not
@@ -53,7 +54,7 @@
 // It is also responsible for managing and freeing VABuffers (not VASurfaces),
 // which are used to queue parameters and slice data to the HW codec,
 // as well as underlying memory for VASurfaces themselves.
-class CONTENT_EXPORT VaapiWrapper
+class MEDIA_GPU_EXPORT VaapiWrapper
     : public base::RefCountedThreadSafe<VaapiWrapper> {
  public:
   enum CodecMode {
@@ -81,11 +82,11 @@
 
   // Return the supported video encode profiles.
   static media::VideoEncodeAccelerator::SupportedProfiles
-      GetSupportedEncodeProfiles();
+  GetSupportedEncodeProfiles();
 
   // Return the supported video decode profiles.
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedDecodeProfiles();
+  GetSupportedDecodeProfiles();
 
   // Return true when JPEG decode is supported.
   static bool IsJpegDecodeSupported();
@@ -309,11 +310,10 @@
   // Get maximum resolution for |va_profile| and |entrypoint| with
   // |required_attribs|. If return value is true, |resolution| is the maximum
   // resolution. |va_lock_| must be held on entry.
-  bool GetMaxResolution_Locked(
-      VAProfile va_profile,
-      VAEntrypoint entrypoint,
-      std::vector<VAConfigAttrib>& required_attribs,
-      gfx::Size* resolution);
+  bool GetMaxResolution_Locked(VAProfile va_profile,
+                               VAEntrypoint entrypoint,
+                               std::vector<VAConfigAttrib>& required_attribs,
+                               gfx::Size* resolution);
 
   // Destroys a |va_surface| created using CreateUnownedSurface.
   void DestroyUnownedSurface(VASurfaceID va_surface_id);
@@ -393,6 +393,6 @@
   DISALLOW_COPY_AND_ASSIGN(VaapiWrapper);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_WRAPPER_H_
+#endif  // MEDIA_GPU_VAAPI_WRAPPER_H_
diff --git a/content/common/gpu/media/video_accelerator_unittest_helpers.h b/media/gpu/video_accelerator_unittest_helpers.h
similarity index 86%
rename from content/common/gpu/media/video_accelerator_unittest_helpers.h
rename to media/gpu/video_accelerator_unittest_helpers.h
index 61e002d..266d2bd 100644
--- a/content/common/gpu/media/video_accelerator_unittest_helpers.h
+++ b/media/gpu/video_accelerator_unittest_helpers.h
@@ -4,15 +4,15 @@
 //
 // This file contains helper classes for video accelerator unittests.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
-#define CONTENT_COMMON_GPU_MEDIA_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
+#ifndef MEDIA_GPU_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
+#define MEDIA_GPU_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
 
 #include <queue>
 
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 
-namespace content {
+namespace media {
 
 // Helper class allowing one thread to wait on a notification from another.
 // If notifications come in faster than they are Wait()'d for, they are
@@ -58,6 +58,6 @@
   return ret;
 }
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
+#endif  // MEDIA_GPU_VIDEO_ACCELERATOR_UNITTEST_HELPERS_H_
diff --git a/content/common/gpu/media/video_decode_accelerator_unittest.cc b/media/gpu/video_decode_accelerator_unittest.cc
similarity index 87%
rename from content/common/gpu/media/video_decode_accelerator_unittest.cc
rename to media/gpu/video_decode_accelerator_unittest.cc
index 0d9e56a..e9c2d04 100644
--- a/content/common/gpu/media/video_decode_accelerator_unittest.cc
+++ b/media/gpu/video_decode_accelerator_unittest.cc
@@ -48,28 +48,28 @@
 #include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/fake_video_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
-#include "content/common/gpu/media/rendering_helper.h"
-#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
 #include "gpu/command_buffer/service/gpu_preferences.h"
 #include "media/filters/h264_parser.h"
+#include "media/gpu/fake_video_decode_accelerator.h"
+#include "media/gpu/gpu_video_decode_accelerator_factory_impl.h"
+#include "media/gpu/rendering_helper.h"
+#include "media/gpu/video_accelerator_unittest_helpers.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "ui/gfx/codec/png_codec.h"
 #include "ui/gl/gl_image.h"
 
 #if defined(OS_WIN)
 #include "base/win/windows_version.h"
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#include "media/gpu/dxva_video_decode_accelerator_win.h"
 #elif defined(OS_CHROMEOS)
 #if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
-#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
+#include "media/gpu/v4l2_device.h"
+#include "media/gpu/v4l2_slice_video_decode_accelerator.h"
+#include "media/gpu/v4l2_video_decode_accelerator.h"
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_video_decode_accelerator.h"
+#include "media/gpu/vaapi_wrapper.h"
 #endif  // defined(ARCH_CPU_X86_FAMILY)
 #else
 #error The VideoAccelerator tests are not supported on this platform.
@@ -84,7 +84,7 @@
 
 using media::VideoDecodeAccelerator;
 
-namespace content {
+namespace media {
 namespace {
 
 using base::MakeTuple;
@@ -166,8 +166,7 @@
         min_fps_render(-1),
         min_fps_no_render(-1),
         profile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
-        reset_after_frame_num(END_OF_STREAM_RESET) {
-  }
+        reset_after_frame_num(END_OF_STREAM_RESET) {}
 
   base::FilePath::StringType file_name;
   int width;
@@ -192,27 +191,26 @@
   filepath = filepath.AddExtension(FILE_PATH_LITERAL(".md5"));
   std::string all_md5s;
   base::ReadFileToString(filepath, &all_md5s);
-  *md5_strings = base::SplitString(
-      all_md5s, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  *md5_strings = base::SplitString(all_md5s, "\n", base::TRIM_WHITESPACE,
+                                   base::SPLIT_WANT_ALL);
   // Check these are legitimate MD5s.
   for (const std::string& md5_string : *md5_strings) {
-      // Ignore the empty string added by SplitString
-      if (!md5_string.length())
-        continue;
-      // Ignore comments
-      if (md5_string.at(0) == '#')
-        continue;
+    // Ignore the empty string added by SplitString
+    if (!md5_string.length())
+      continue;
+    // Ignore comments
+    if (md5_string.at(0) == '#')
+      continue;
 
-      LOG_ASSERT(static_cast<int>(md5_string.length()) ==
-               kMD5StringLength) << md5_string;
-      bool hex_only = std::count_if(md5_string.begin(),
-                                    md5_string.end(), isxdigit) ==
-                                    kMD5StringLength;
-      LOG_ASSERT(hex_only) << md5_string;
+    LOG_ASSERT(static_cast<int>(md5_string.length()) == kMD5StringLength)
+        << md5_string;
+    bool hex_only = std::count_if(md5_string.begin(), md5_string.end(),
+                                  isxdigit) == kMD5StringLength;
+    LOG_ASSERT(hex_only) << md5_string;
   }
   LOG_ASSERT(md5_strings->size() >= 1U) << "  MD5 checksum file ("
-                                    << filepath.MaybeAsASCII()
-                                    << ") missing or empty.";
+                                        << filepath.MaybeAsASCII()
+                                        << ") missing or empty.";
 }
 
 // State of the GLRenderingVDAClient below.  Order matters here as the test
@@ -465,8 +463,8 @@
   std::string GetBytesForNextFragment(size_t start_pos, size_t* end_pos);
   // Helpers for GetBytesForNextFragment above.
   void GetBytesForNextNALU(size_t start_pos, size_t* end_pos);  // For h.264.
-  std::string GetBytesForNextFrame(
-      size_t start_pos, size_t* end_pos);  // For VP8/9.
+  std::string GetBytesForNextFrame(size_t start_pos,
+                                   size_t* end_pos);  // For VP8/9.
 
   // Request decode of the next fragment in the encoded data.
   void DecodeNextFragment();
@@ -656,8 +654,8 @@
   for (uint32_t i = 0; i < requested_num_of_buffers; ++i) {
     uint32_t texture_id;
     base::WaitableEvent done(false, false);
-    rendering_helper_->CreateTexture(
-        texture_target_, &texture_id, dimensions, &done);
+    rendering_helper_->CreateTexture(texture_target_, &texture_id, dimensions,
+                                     &done);
     done.Wait();
 
     scoped_refptr<TextureRef> texture_ref;
@@ -852,8 +850,7 @@
   output->WriteAtCurrentPos(s.data(), s.length());
   base::TimeTicks t0 = initialize_done_ticks_;
   for (size_t i = 0; i < frame_delivery_times_.size(); ++i) {
-    s = base::StringPrintf("frame %04" PRIuS ": %" PRId64 " us\n",
-                           i,
+    s = base::StringPrintf("frame %04" PRIuS ": %" PRId64 " us\n", i,
                            (frame_delivery_times_[i] - t0).InMicroseconds());
     t0 = frame_delivery_times_[i];
     output->WriteAtCurrentPos(s.data(), s.length());
@@ -861,8 +858,8 @@
 }
 
 static bool LookingAtNAL(const std::string& encoded, size_t pos) {
-  return encoded[pos] == 0 && encoded[pos + 1] == 0 &&
-      encoded[pos + 2] == 0 && encoded[pos + 3] == 1;
+  return encoded[pos] == 0 && encoded[pos + 1] == 0 && encoded[pos + 2] == 0 &&
+         encoded[pos + 3] == 1;
 }
 
 void GLRenderingVDAClient::SetState(ClientState new_state) {
@@ -902,12 +899,12 @@
     SetState(static_cast<ClientState>(i));
 }
 
-std::string GLRenderingVDAClient::GetBytesForFirstFragment(
-    size_t start_pos, size_t* end_pos) {
+std::string GLRenderingVDAClient::GetBytesForFirstFragment(size_t start_pos,
+                                                           size_t* end_pos) {
   if (profile_ < media::H264PROFILE_MAX) {
     *end_pos = start_pos;
     while (*end_pos + 4 < encoded_data_.size()) {
-      if ((encoded_data_[*end_pos + 4] & 0x1f) == 0x7) // SPS start frame
+      if ((encoded_data_[*end_pos + 4] & 0x1f) == 0x7)  // SPS start frame
         return GetBytesForNextFragment(*end_pos, end_pos);
       GetBytesForNextNALU(*end_pos, end_pos);
       num_skipped_fragments_++;
@@ -919,8 +916,8 @@
   return GetBytesForNextFragment(start_pos, end_pos);
 }
 
-std::string GLRenderingVDAClient::GetBytesForNextFragment(
-    size_t start_pos, size_t* end_pos) {
+std::string GLRenderingVDAClient::GetBytesForNextFragment(size_t start_pos,
+                                                          size_t* end_pos) {
   if (profile_ < media::H264PROFILE_MAX) {
     *end_pos = start_pos;
     GetBytesForNextNALU(*end_pos, end_pos);
@@ -933,8 +930,8 @@
   return GetBytesForNextFrame(start_pos, end_pos);
 }
 
-void GLRenderingVDAClient::GetBytesForNextNALU(
-    size_t start_pos, size_t* end_pos) {
+void GLRenderingVDAClient::GetBytesForNextNALU(size_t start_pos,
+                                               size_t* end_pos) {
   *end_pos = start_pos;
   if (*end_pos + 4 > encoded_data_.size())
     return;
@@ -948,8 +945,8 @@
     *end_pos = encoded_data_.size();
 }
 
-std::string GLRenderingVDAClient::GetBytesForNextFrame(
-    size_t start_pos, size_t* end_pos) {
+std::string GLRenderingVDAClient::GetBytesForNextFrame(size_t start_pos,
+                                                       size_t* end_pos) {
   // Helpful description: http://wiki.multimedia.cx/index.php?title=IVF
   std::string bytes;
   if (start_pos == 0)
@@ -966,8 +963,7 @@
 static bool FragmentHasConfigInfo(const uint8_t* data,
                                   size_t size,
                                   media::VideoCodecProfile profile) {
-  if (profile >= media::H264PROFILE_MIN &&
-      profile <= media::H264PROFILE_MAX) {
+  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
     media::H264Parser parser;
     parser.SetStream(data, size);
     media::H264NALU nalu;
@@ -1020,11 +1016,11 @@
   LOG_ASSERT(shm.CreateAndMapAnonymous(next_fragment_size));
   memcpy(shm.memory(), next_fragment_bytes.data(), next_fragment_size);
   base::SharedMemoryHandle dup_handle;
-  bool result = shm.ShareToProcess(base::GetCurrentProcessHandle(),
-      &dup_handle);
+  bool result =
+      shm.ShareToProcess(base::GetCurrentProcessHandle(), &dup_handle);
   LOG_ASSERT(result);
-  media::BitstreamBuffer bitstream_buffer(
-      next_bitstream_buffer_id_, dup_handle, next_fragment_size);
+  media::BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_, dup_handle,
+                                          next_fragment_size);
   decode_start_time_[next_bitstream_buffer_id_] = base::TimeTicks::Now();
   // Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
   next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
@@ -1083,10 +1079,9 @@
   // Update the parameters of |test_video_files| according to
   // |num_concurrent_decoders| and |reset_point|. Ex: the expected number of
   // frames should be adjusted if decoder is reset in the middle of the stream.
-  void UpdateTestVideoFileParams(
-      size_t num_concurrent_decoders,
-      int reset_point,
-      std::vector<TestVideoFile*>* test_video_files);
+  void UpdateTestVideoFileParams(size_t num_concurrent_decoders,
+                                 int reset_point,
+                                 std::vector<TestVideoFile*>* test_video_files);
 
   void InitializeRenderingHelper(const RenderingHelperParams& helper_params);
   void CreateAndStartDecoder(GLRenderingVDAClient* client,
@@ -1106,8 +1101,7 @@
   DISALLOW_COPY_AND_ASSIGN(VideoDecodeAcceleratorTest);
 };
 
-VideoDecodeAcceleratorTest::VideoDecodeAcceleratorTest() {
-}
+VideoDecodeAcceleratorTest::VideoDecodeAcceleratorTest() {}
 
 void VideoDecodeAcceleratorTest::SetUp() {
   ParseAndReadTestVideoData(g_test_video_data, &test_video_files_);
@@ -1130,14 +1124,14 @@
 void VideoDecodeAcceleratorTest::ParseAndReadTestVideoData(
     base::FilePath::StringType data,
     std::vector<TestVideoFile*>* test_video_files) {
-  std::vector<base::FilePath::StringType> entries = base::SplitString(
-      data, base::FilePath::StringType(1, ';'),
-      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  std::vector<base::FilePath::StringType> entries =
+      base::SplitString(data, base::FilePath::StringType(1, ';'),
+                        base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   LOG_ASSERT(entries.size() >= 1U) << data;
   for (size_t index = 0; index < entries.size(); ++index) {
-    std::vector<base::FilePath::StringType> fields = base::SplitString(
-        entries[index], base::FilePath::StringType(1, ':'),
-        base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+    std::vector<base::FilePath::StringType> fields =
+        base::SplitString(entries[index], base::FilePath::StringType(1, ':'),
+                          base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
     LOG_ASSERT(fields.size() >= 1U) << entries[index];
     LOG_ASSERT(fields.size() <= 8U) << entries[index];
     TestVideoFile* video_file = new TestVideoFile(fields[0]);
@@ -1250,8 +1244,7 @@
 class VideoDecodeAcceleratorParamTest
     : public VideoDecodeAcceleratorTest,
       public ::testing::WithParamInterface<
-        base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool> > {
-};
+          base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool>> {};
 
 // Wait for |note| to report a state and if it's not |expected_state| then
 // assert |client| has deleted its decoder.
@@ -1260,10 +1253,11 @@
     GLRenderingVDAClient* client,
     ClientState expected_state) {
   ClientState state = note->Wait();
-  if (state == expected_state) return;
+  if (state == expected_state)
+    return;
   ASSERT_TRUE(client->decoder_deleted())
-      << "Decoder not deleted but Wait() returned " << state
-      << ", instead of " << expected_state;
+      << "Decoder not deleted but Wait() returned " << state << ", instead of "
+      << expected_state;
 }
 
 // We assert a minimal number of concurrent decoders we expect to succeed.
@@ -1288,14 +1282,14 @@
   if (g_num_play_throughs > 0)
     num_play_throughs = g_num_play_throughs;
 
-  UpdateTestVideoFileParams(
-      num_concurrent_decoders, reset_point, &test_video_files_);
+  UpdateTestVideoFileParams(num_concurrent_decoders, reset_point,
+                            &test_video_files_);
 
   // Suppress GL rendering for all tests when the "--rendering_fps" is 0.
   const bool suppress_rendering = g_rendering_fps == 0;
 
-  std::vector<ClientStateNotification<ClientState>*>
-      notes(num_concurrent_decoders, NULL);
+  std::vector<ClientStateNotification<ClientState>*> notes(
+      num_concurrent_decoders, NULL);
   std::vector<GLRenderingVDAClient*> clients(num_concurrent_decoders, NULL);
 
   RenderingHelperParams helper_params;
@@ -1323,23 +1317,13 @@
       delay_after_frame_num = video_file->num_frames - kMaxFramesToDelayReuse;
     }
 
-    GLRenderingVDAClient* client =
-        new GLRenderingVDAClient(index,
-                                 &rendering_helper_,
-                                 note,
-                                 video_file->data_str,
-                                 num_in_flight_decodes,
-                                 num_play_throughs,
-                                 video_file->reset_after_frame_num,
-                                 delete_decoder_state,
-                                 video_file->width,
-                                 video_file->height,
-                                 video_file->profile,
-                                 g_fake_decoder,
-                                 suppress_rendering,
-                                 delay_after_frame_num,
-                                 0,
-                                 render_as_thumbnails);
+    GLRenderingVDAClient* client = new GLRenderingVDAClient(
+        index, &rendering_helper_, note, video_file->data_str,
+        num_in_flight_decodes, num_play_throughs,
+        video_file->reset_after_frame_num, delete_decoder_state,
+        video_file->width, video_file->height, video_file->profile,
+        g_fake_decoder, suppress_rendering, delay_after_frame_num, 0,
+        render_as_thumbnails);
 
     clients[index] = client;
     helper_params.window_sizes.push_back(
@@ -1394,8 +1378,9 @@
         AssertWaitForStateOrDeleted(note, clients[i], CS_DESTROYED));
   }
   // Finally assert that decoding went as expected.
-  for (size_t i = 0; i < num_concurrent_decoders &&
-           !skip_performance_and_correctness_checks; ++i) {
+  for (size_t i = 0;
+       i < num_concurrent_decoders && !skip_performance_and_correctness_checks;
+       ++i) {
     // We can only make performance/correctness assertions if the decoder was
     // allowed to finish.
     if (delete_decoder_state < CS_FLUSHED)
@@ -1412,14 +1397,14 @@
     }
     if (reset_point == END_OF_STREAM_RESET) {
       EXPECT_EQ(video_file->num_fragments, client->num_skipped_fragments() +
-                client->num_queued_fragments());
+                                               client->num_queued_fragments());
       EXPECT_EQ(client->num_done_bitstream_buffers(),
                 client->num_queued_fragments());
     }
     LOG(INFO) << "Decoder " << i << " fps: " << client->frames_per_second();
     if (!render_as_thumbnails) {
-      int min_fps = suppress_rendering ?
-          video_file->min_fps_no_render : video_file->min_fps_render;
+      int min_fps = suppress_rendering ? video_file->min_fps_no_render
+                                       : video_file->min_fps_render;
       if (min_fps > 0 && !test_reuse_delay)
         EXPECT_GT(client->frames_per_second(), min_fps);
     }
@@ -1444,22 +1429,18 @@
     if (match == golden_md5s.end()) {
       // Convert raw RGB into PNG for export.
       std::vector<unsigned char> png;
-      gfx::PNGCodec::Encode(&rgb[0],
-                            gfx::PNGCodec::FORMAT_RGB,
+      gfx::PNGCodec::Encode(&rgb[0], gfx::PNGCodec::FORMAT_RGB,
                             kThumbnailsPageSize,
-                            kThumbnailsPageSize.width() * 3,
-                            true,
-                            std::vector<gfx::PNGCodec::Comment>(),
-                            &png);
+                            kThumbnailsPageSize.width() * 3, true,
+                            std::vector<gfx::PNGCodec::Comment>(), &png);
 
       LOG(ERROR) << "Unknown thumbnails MD5: " << md5_string;
 
       base::FilePath filepath(test_video_files_[0]->file_name);
       filepath = filepath.AddExtension(FILE_PATH_LITERAL(".bad_thumbnails"));
       filepath = filepath.AddExtension(FILE_PATH_LITERAL(".png"));
-      int num_bytes = base::WriteFile(filepath,
-                                           reinterpret_cast<char*>(&png[0]),
-                                           png.size());
+      int num_bytes = base::WriteFile(
+          filepath, reinterpret_cast<char*>(&png[0]), png.size());
       ASSERT_EQ(num_bytes, static_cast<int>(png.size()));
     }
     ASSERT_NE(match, golden_md5s.end());
@@ -1492,39 +1473,49 @@
 
 // Test that replay after EOS works fine.
 INSTANTIATE_TEST_CASE_P(
-    ReplayAfterEOS, VideoDecodeAcceleratorParamTest,
+    ReplayAfterEOS,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 4, END_OF_STREAM_RESET, CS_RESET, false, false)));
 
 // Test that Reset() before the first Decode() works fine.
 INSTANTIATE_TEST_CASE_P(
-    ResetBeforeDecode, VideoDecodeAcceleratorParamTest,
+    ResetBeforeDecode,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, START_OF_STREAM_RESET, CS_RESET, false, false)));
 
 // Test Reset() immediately after Decode() containing config info.
 INSTANTIATE_TEST_CASE_P(
-    ResetAfterFirstConfigInfo, VideoDecodeAcceleratorParamTest,
-    ::testing::Values(
-        MakeTuple(
-            1, 1, 1, RESET_AFTER_FIRST_CONFIG_INFO, CS_RESET, false, false)));
+    ResetAfterFirstConfigInfo,
+    VideoDecodeAcceleratorParamTest,
+    ::testing::Values(MakeTuple(1,
+                                1,
+                                1,
+                                RESET_AFTER_FIRST_CONFIG_INFO,
+                                CS_RESET,
+                                false,
+                                false)));
 
 // Test that Reset() mid-stream works fine and doesn't affect decoding even when
 // Decode() calls are made during the reset.
 INSTANTIATE_TEST_CASE_P(
-    MidStreamReset, VideoDecodeAcceleratorParamTest,
+    MidStreamReset,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, MID_STREAM_RESET, CS_RESET, false, false)));
 
 INSTANTIATE_TEST_CASE_P(
-    SlowRendering, VideoDecodeAcceleratorParamTest,
+    SlowRendering,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, true, false)));
 
 // Test that Destroy() mid-stream works fine (primarily this is testing that no
 // crashes occur).
 INSTANTIATE_TEST_CASE_P(
-    TearDownTiming, VideoDecodeAcceleratorParamTest,
+    TearDownTiming,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_DECODER_SET, false, false),
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_INITIALIZED, false, false),
@@ -1532,16 +1523,32 @@
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_FLUSHED, false, false),
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESETTING, false, false),
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, false, false),
-        MakeTuple(1, 1, 1, END_OF_STREAM_RESET,
-                  static_cast<ClientState>(-1), false, false),
-        MakeTuple(1, 1, 1, END_OF_STREAM_RESET,
-                  static_cast<ClientState>(-10), false, false),
-        MakeTuple(1, 1, 1, END_OF_STREAM_RESET,
-                  static_cast<ClientState>(-100), false, false)));
+        MakeTuple(1,
+                  1,
+                  1,
+                  END_OF_STREAM_RESET,
+                  static_cast<ClientState>(-1),
+                  false,
+                  false),
+        MakeTuple(1,
+                  1,
+                  1,
+                  END_OF_STREAM_RESET,
+                  static_cast<ClientState>(-10),
+                  false,
+                  false),
+        MakeTuple(1,
+                  1,
+                  1,
+                  END_OF_STREAM_RESET,
+                  static_cast<ClientState>(-100),
+                  false,
+                  false)));
 
 // Test that decoding various variation works with multiple in-flight decodes.
 INSTANTIATE_TEST_CASE_P(
-    DecodeVariations, VideoDecodeAcceleratorParamTest,
+    DecodeVariations,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, false, false),
         MakeTuple(1, 10, 1, END_OF_STREAM_RESET, CS_RESET, false, false),
@@ -1550,18 +1557,29 @@
 
 // Find out how many concurrent decoders can go before we exhaust system
 // resources.
-INSTANTIATE_TEST_CASE_P(
-    ResourceExhaustion, VideoDecodeAcceleratorParamTest,
-    ::testing::Values(
-        // +0 hack below to promote enum to int.
-        MakeTuple(kMinSupportedNumConcurrentDecoders + 0, 1, 1,
-                  END_OF_STREAM_RESET, CS_RESET, false, false),
-        MakeTuple(kMinSupportedNumConcurrentDecoders + 1, 1, 1,
-                  END_OF_STREAM_RESET, CS_RESET, false, false)));
+INSTANTIATE_TEST_CASE_P(ResourceExhaustion,
+                        VideoDecodeAcceleratorParamTest,
+                        ::testing::Values(
+                            // +0 hack below to promote enum to int.
+                            MakeTuple(kMinSupportedNumConcurrentDecoders + 0,
+                                      1,
+                                      1,
+                                      END_OF_STREAM_RESET,
+                                      CS_RESET,
+                                      false,
+                                      false),
+                            MakeTuple(kMinSupportedNumConcurrentDecoders + 1,
+                                      1,
+                                      1,
+                                      END_OF_STREAM_RESET,
+                                      CS_RESET,
+                                      false,
+                                      false)));
 
 // Thumbnailing test
 INSTANTIATE_TEST_CASE_P(
-    Thumbnail, VideoDecodeAcceleratorParamTest,
+    Thumbnail,
+    VideoDecodeAcceleratorParamTest,
     ::testing::Values(
         MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, false, true)));
 
@@ -1577,23 +1595,13 @@
 
   ClientStateNotification<ClientState>* note =
       new ClientStateNotification<ClientState>();
-  GLRenderingVDAClient* client =
-      new GLRenderingVDAClient(0,
-                               &rendering_helper_,
-                               note,
-                               test_video_files_[0]->data_str,
-                               1,
-                               1,
-                               test_video_files_[0]->reset_after_frame_num,
-                               CS_RESET,
-                               test_video_files_[0]->width,
-                               test_video_files_[0]->height,
-                               test_video_files_[0]->profile,
-                               g_fake_decoder,
-                               true,
-                               std::numeric_limits<int>::max(),
-                               kWebRtcDecodeCallsPerSecond,
-                               false /* render_as_thumbnail */);
+  GLRenderingVDAClient* client = new GLRenderingVDAClient(
+      0, &rendering_helper_, note, test_video_files_[0]->data_str, 1, 1,
+      test_video_files_[0]->reset_after_frame_num, CS_RESET,
+      test_video_files_[0]->width, test_video_files_[0]->height,
+      test_video_files_[0]->profile, g_fake_decoder, true,
+      std::numeric_limits<int>::max(), kWebRtcDecodeCallsPerSecond,
+      false /* render_as_thumbnail */);
   helper_params.window_sizes.push_back(
       gfx::Size(test_video_files_[0]->width, test_video_files_[0]->height));
   InitializeRenderingHelper(helper_params);
@@ -1621,9 +1629,9 @@
 // - Test frame size changes mid-stream
 
 }  // namespace
-}  // namespace content
+}  // namespace media
 
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);  // Removes gtest-specific args.
   base::CommandLine::Init(argc, argv);
 
@@ -1639,39 +1647,39 @@
   for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
        it != switches.end(); ++it) {
     if (it->first == "test_video_data") {
-      content::g_test_video_data = it->second.c_str();
+      media::g_test_video_data = it->second.c_str();
       continue;
     }
     // The output log for VDA performance test.
     if (it->first == "output_log") {
-      content::g_output_log = it->second.c_str();
+      media::g_output_log = it->second.c_str();
       continue;
     }
     if (it->first == "rendering_fps") {
       // On Windows, CommandLine::StringType is wstring. We need to convert
       // it to std::string first
       std::string input(it->second.begin(), it->second.end());
-      LOG_ASSERT(base::StringToDouble(input, &content::g_rendering_fps));
+      LOG_ASSERT(base::StringToDouble(input, &media::g_rendering_fps));
       continue;
     }
     if (it->first == "rendering_warm_up") {
       std::string input(it->second.begin(), it->second.end());
-      LOG_ASSERT(base::StringToInt(input, &content::g_rendering_warm_up));
+      LOG_ASSERT(base::StringToInt(input, &media::g_rendering_warm_up));
       continue;
     }
     // TODO(owenlin): Remove this flag once it is not used in autotest.
     if (it->first == "disable_rendering") {
-      content::g_rendering_fps = 0;
+      media::g_rendering_fps = 0;
       continue;
     }
 
     if (it->first == "num_play_throughs") {
       std::string input(it->second.begin(), it->second.end());
-      LOG_ASSERT(base::StringToInt(input, &content::g_num_play_throughs));
+      LOG_ASSERT(base::StringToInt(input, &media::g_num_play_throughs));
       continue;
     }
     if (it->first == "fake_decoder") {
-      content::g_fake_decoder = 1;
+      media::g_fake_decoder = 1;
       continue;
     }
     if (it->first == "v" || it->first == "vmodule")
@@ -1679,7 +1687,7 @@
     if (it->first == "ozone-platform" || it->first == "ozone-use-surfaceless")
       continue;
     if (it->first == "test_import") {
-      content::g_test_import = true;
+      media::g_test_import = true;
       continue;
     }
     LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second;
@@ -1701,13 +1709,13 @@
 #endif
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  content::VaapiWrapper::PreSandboxInitialization();
+  media::VaapiWrapper::PreSandboxInitialization();
 #endif
 
-  content::g_env =
-      reinterpret_cast<content::VideoDecodeAcceleratorTestEnvironment*>(
+  media::g_env =
+      reinterpret_cast<media::VideoDecodeAcceleratorTestEnvironment*>(
           testing::AddGlobalTestEnvironment(
-              new content::VideoDecodeAcceleratorTestEnvironment()));
+              new media::VideoDecodeAcceleratorTestEnvironment()));
 
   return RUN_ALL_TESTS();
 }
diff --git a/content/common/gpu/media/video_encode_accelerator_unittest.cc b/media/gpu/video_encode_accelerator_unittest.cc
similarity index 95%
rename from content/common/gpu/media/video_encode_accelerator_unittest.cc
rename to media/gpu/video_encode_accelerator_unittest.cc
index 3f3f9ef..0cfbfaa 100644
--- a/content/common/gpu/media/video_encode_accelerator_unittest.cc
+++ b/media/gpu/video_encode_accelerator_unittest.cc
@@ -30,7 +30,6 @@
 #include "base/time/time.h"
 #include "base/timer/timer.h"
 #include "build/build_config.h"
-#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
 #include "media/base/bind_to_current_loop.h"
 #include "media/base/bitstream_buffer.h"
 #include "media/base/cdm_context.h"
@@ -43,29 +42,30 @@
 #include "media/filters/ffmpeg_video_decoder.h"
 #include "media/filters/h264_parser.h"
 #include "media/filters/ivf_parser.h"
+#include "media/gpu/video_accelerator_unittest_helpers.h"
 #include "media/video/fake_video_encode_accelerator.h"
 #include "media/video/video_encode_accelerator.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 #if defined(OS_CHROMEOS)
 #if defined(ARCH_CPU_ARMEL) || (defined(USE_OZONE) && defined(USE_V4L2_CODEC))
-#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
+#include "media/gpu/v4l2_video_encode_accelerator.h"
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_encode_accelerator.h"
-#include "content/common/gpu/media/vaapi_wrapper.h"
+#include "media/gpu/vaapi_video_encode_accelerator.h"
+#include "media/gpu/vaapi_wrapper.h"
 // Status has been defined as int in Xlib.h.
 #undef Status
 #endif  // defined(ARCH_CPU_X86_FAMILY)
 #elif defined(OS_MACOSX)
-#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
+#include "media/gpu/vt_video_encode_accelerator_mac.h"
 #else
 #error The VideoEncodeAcceleratorUnittest is not supported on this platform.
 #endif
 
 using media::VideoEncodeAccelerator;
 
-namespace content {
+namespace media {
 namespace {
 
 const media::VideoPixelFormat kInputFormat = media::PIXEL_FORMAT_I420;
@@ -329,7 +329,8 @@
       << "File should be mapped at a 64 byte boundary";
 
   LOG_ASSERT(test_stream->mapped_aligned_in_file.length() %
-               test_stream->aligned_buffer_size == 0U)
+                 test_stream->aligned_buffer_size ==
+             0U)
       << "Stream byte size is not a product of calculated frame byte size";
   LOG_ASSERT(test_stream->num_frames > 0UL);
 }
@@ -339,9 +340,9 @@
 static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
                                        ScopedVector<TestStream>* test_streams) {
   // Split the string to individual test stream data.
-  std::vector<base::FilePath::StringType> test_streams_data = base::SplitString(
-      data, base::FilePath::StringType(1, ';'),
-      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  std::vector<base::FilePath::StringType> test_streams_data =
+      base::SplitString(data, base::FilePath::StringType(1, ';'),
+                        base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   LOG_ASSERT(test_streams_data.size() >= 1U) << data;
 
   // Parse each test stream data and read the input file.
@@ -373,21 +374,21 @@
       test_stream->out_filename = fields[4];
 
     if (fields.size() >= 6 && !fields[5].empty())
-      LOG_ASSERT(base::StringToUint(fields[5],
-          &test_stream->requested_bitrate));
+      LOG_ASSERT(
+          base::StringToUint(fields[5], &test_stream->requested_bitrate));
 
     if (fields.size() >= 7 && !fields[6].empty())
-      LOG_ASSERT(base::StringToUint(fields[6],
-          &test_stream->requested_framerate));
+      LOG_ASSERT(
+          base::StringToUint(fields[6], &test_stream->requested_framerate));
 
     if (fields.size() >= 8 && !fields[7].empty()) {
-      LOG_ASSERT(base::StringToUint(fields[7],
-                               &test_stream->requested_subsequent_bitrate));
+      LOG_ASSERT(base::StringToUint(
+          fields[7], &test_stream->requested_subsequent_bitrate));
     }
 
     if (fields.size() >= 9 && !fields[8].empty()) {
-      LOG_ASSERT(base::StringToUint(fields[8],
-                               &test_stream->requested_subsequent_framerate));
+      LOG_ASSERT(base::StringToUint(
+          fields[8], &test_stream->requested_subsequent_framerate));
     }
     test_streams->push_back(test_stream);
   }
@@ -546,7 +547,7 @@
         ASSERT_TRUE(seen_pps_);
         seen_idr_ = true;
         keyframe = true;
-        // fallthrough
+      // fallthrough
       case media::H264NALU::kNonIDRSlice: {
         ASSERT_TRUE(seen_idr_);
         if (!frame_cb_.Run(keyframe))
@@ -578,8 +579,7 @@
 class VP8Validator : public StreamValidator {
  public:
   explicit VP8Validator(const FrameFoundCallback& frame_cb)
-      : StreamValidator(frame_cb),
-        seen_keyframe_(false) {}
+      : StreamValidator(frame_cb), seen_keyframe_(false) {}
 
   void ProcessStreamBuffer(const uint8_t* stream, size_t size) override;
 
@@ -1051,7 +1051,9 @@
   thread_checker_.DetachFromThread();
 }
 
-VEAClient::~VEAClient() { LOG_ASSERT(!has_encoder()); }
+VEAClient::~VEAClient() {
+  LOG_ASSERT(!has_encoder());
+}
 
 std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateFakeVEA() {
   std::unique_ptr<media::VideoEncodeAccelerator> encoder;
@@ -1066,7 +1068,7 @@
 std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateV4L2VEA() {
   std::unique_ptr<media::VideoEncodeAccelerator> encoder;
 #if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \
-    (defined(USE_OZONE) && defined(USE_V4L2_CODEC)))
+                             (defined(USE_OZONE) && defined(USE_V4L2_CODEC)))
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
   if (device)
     encoder.reset(new V4L2VideoEncodeAccelerator(device));
@@ -1105,11 +1107,9 @@
       continue;
     encoder_ = std::move(encoders[i]);
     SetState(CS_ENCODER_SET);
-    if (encoder_->Initialize(kInputFormat,
-                             test_stream_->visible_size,
+    if (encoder_->Initialize(kInputFormat, test_stream_->visible_size,
                              test_stream_->requested_profile,
-                             requested_bitrate_,
-                             this)) {
+                             requested_bitrate_, this)) {
       SetStreamParameters(requested_bitrate_, requested_framerate_);
       SetState(CS_INITIALIZED);
 
@@ -1359,11 +1359,9 @@
 
   scoped_refptr<media::VideoFrame> frame = CreateFrame(position);
   EXPECT_TRUE(frame);
-  frame->AddDestructionObserver(
-          media::BindToCurrentLoop(
-              base::Bind(&VEAClient::InputNoLongerNeededCallback,
-                         base::Unretained(this),
-                         next_input_id_)));
+  frame->AddDestructionObserver(media::BindToCurrentLoop(
+      base::Bind(&VEAClient::InputNoLongerNeededCallback,
+                 base::Unretained(this), next_input_id_)));
 
   LOG_ASSERT(inputs_at_client_.insert(next_input_id_).second);
 
@@ -1431,10 +1429,11 @@
   base::SharedMemoryHandle dup_handle;
   LOG_ASSERT(shm->ShareToProcess(base::GetCurrentProcessHandle(), &dup_handle));
 
-  media::BitstreamBuffer bitstream_buffer(
-      next_output_buffer_id_++, dup_handle, output_buffer_size_);
-  LOG_ASSERT(output_buffers_at_client_.insert(
-      std::make_pair(bitstream_buffer.id(), shm)).second);
+  media::BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, dup_handle,
+                                          output_buffer_size_);
+  LOG_ASSERT(output_buffers_at_client_
+                 .insert(std::make_pair(bitstream_buffer.id(), shm))
+                 .second);
   encoder_->UseOutputBitstreamBuffer(bitstream_buffer);
 }
 
@@ -1528,23 +1527,22 @@
   unsigned int bitrate = encoded_stream_size_since_last_check_ * 8 *
                          current_framerate_ / num_frames_since_last_check_;
   DVLOG(1) << "Current chunk's bitrate: " << bitrate
-           << " (expected: " << current_requested_bitrate_
-           << " @ " << current_framerate_ << " FPS,"
+           << " (expected: " << current_requested_bitrate_ << " @ "
+           << current_framerate_ << " FPS,"
            << " num frames in chunk: " << num_frames_since_last_check_;
 
   num_frames_since_last_check_ = 0;
   encoded_stream_size_since_last_check_ = 0;
 
   if (force_bitrate_) {
-    EXPECT_NEAR(bitrate,
-                current_requested_bitrate_,
+    EXPECT_NEAR(bitrate, current_requested_bitrate_,
                 kBitrateTolerance * current_requested_bitrate_);
   }
 
   // All requested keyframes should've been provided. Allow the last requested
   // frame to remain undelivered if we haven't reached the maximum frame number
   // by which it should have arrived.
-  if (num_encoded_frames_ < next_keyframe_at_  + kMaxKeyframeDelay)
+  if (num_encoded_frames_ < next_keyframe_at_ + kMaxKeyframeDelay)
     EXPECT_LE(num_keyframes_requested_, 1UL);
   else
     EXPECT_EQ(num_keyframes_requested_, 0UL);
@@ -1610,7 +1608,7 @@
   const bool verify_output =
       base::get<7>(GetParam()) || g_env->verify_all_output();
 
-  ScopedVector<ClientStateNotification<ClientState> > notes;
+  ScopedVector<ClientStateNotification<ClientState>> notes;
   ScopedVector<VEAClient> clients;
   base::Thread encoder_thread("EncoderThread");
   ASSERT_TRUE(encoder_thread.Start());
@@ -1633,9 +1631,8 @@
         mid_stream_bitrate_switch, mid_stream_framerate_switch, verify_output));
 
     encoder_thread.message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&VEAClient::CreateEncoder,
-                   base::Unretained(clients.back())));
+        FROM_HERE, base::Bind(&VEAClient::CreateEncoder,
+                              base::Unretained(clients.back())));
   }
 
   // All encoders must pass through states in this order.
@@ -1738,7 +1735,7 @@
 // - mid-stream encoder_->Destroy()
 
 }  // namespace
-}  // namespace content
+}  // namespace media
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);  // Removes gtest-specific args.
@@ -1749,8 +1746,8 @@
 
   std::unique_ptr<base::FilePath::StringType> test_stream_data(
       new base::FilePath::StringType(
-          media::GetTestDataFilePath(content::g_default_in_filename).value() +
-          content::g_default_in_parameters));
+          media::GetTestDataFilePath(media::g_default_in_filename).value() +
+          media::g_default_in_parameters));
 
   // Needed to enable DVLOG through --vmodule.
   logging::LoggingSettings settings;
@@ -1767,8 +1764,7 @@
 
   base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
   for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
-       it != switches.end();
-       ++it) {
+       it != switches.end(); ++it) {
     if (it->first == "test_stream_data") {
       test_stream_data->assign(it->second.c_str());
       continue;
@@ -1781,7 +1777,7 @@
     }
     if (it->first == "num_frames_to_encode") {
       std::string input(it->second.begin(), it->second.end());
-      LOG_ASSERT(base::StringToInt(input, &content::g_num_frames_to_encode));
+      LOG_ASSERT(base::StringToInt(input, &media::g_num_frames_to_encode));
       continue;
     }
     if (it->first == "measure_latency") {
@@ -1789,7 +1785,7 @@
       continue;
     }
     if (it->first == "fake_encoder") {
-      content::g_fake_encoder = true;
+      media::g_fake_encoder = true;
       continue;
     }
     if (it->first == "run_at_fps") {
@@ -1815,13 +1811,13 @@
   }
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  content::VaapiWrapper::PreSandboxInitialization();
+  media::VaapiWrapper::PreSandboxInitialization();
 #endif
 
-  content::g_env =
-      reinterpret_cast<content::VideoEncodeAcceleratorTestEnvironment*>(
+  media::g_env =
+      reinterpret_cast<media::VideoEncodeAcceleratorTestEnvironment*>(
           testing::AddGlobalTestEnvironment(
-              new content::VideoEncodeAcceleratorTestEnvironment(
+              new media::VideoEncodeAcceleratorTestEnvironment(
                   std::move(test_stream_data), log_path, run_at_fps,
                   needs_encode_latency, verify_all_output)));
 
diff --git a/content/common/gpu/media/vp8_decoder.cc b/media/gpu/vp8_decoder.cc
similarity index 95%
rename from content/common/gpu/media/vp8_decoder.cc
rename to media/gpu/vp8_decoder.cc
index eed6b59a..9f315c4 100644
--- a/content/common/gpu/media/vp8_decoder.cc
+++ b/media/gpu/vp8_decoder.cc
@@ -2,16 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vp8_decoder.h"
+#include "media/gpu/vp8_decoder.h"
 #include "media/base/limits.h"
 
-namespace content {
+namespace media {
 
-VP8Decoder::VP8Accelerator::VP8Accelerator() {
-}
+VP8Decoder::VP8Accelerator::VP8Accelerator() {}
 
-VP8Decoder::VP8Accelerator::~VP8Accelerator() {
-}
+VP8Decoder::VP8Accelerator::~VP8Accelerator() {}
 
 VP8Decoder::VP8Decoder(VP8Accelerator* accelerator)
     : state_(kNeedStreamMetadata),
@@ -21,8 +19,7 @@
   DCHECK(accelerator_);
 }
 
-VP8Decoder::~VP8Decoder() {
-}
+VP8Decoder::~VP8Decoder() {}
 
 bool VP8Decoder::Flush() {
   DVLOG(2) << "Decoder flush";
@@ -194,4 +191,4 @@
   return kVP8NumFramesActive + kPicsInPipeline;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vp8_decoder.h b/media/gpu/vp8_decoder.h
similarity index 88%
rename from content/common/gpu/media/vp8_decoder.h
rename to media/gpu/vp8_decoder.h
index 50b3d1c..32ca48b 100644
--- a/content/common/gpu/media/vp8_decoder.h
+++ b/media/gpu/vp8_decoder.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VP8_DECODER_H_
-#define CONTENT_COMMON_GPU_MEDIA_VP8_DECODER_H_
+#ifndef MEDIA_GPU_VP8_DECODER_H_
+#define MEDIA_GPU_VP8_DECODER_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -12,11 +12,11 @@
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/gpu/media/accelerated_video_decoder.h"
-#include "content/common/gpu/media/vp8_picture.h"
 #include "media/filters/vp8_parser.h"
+#include "media/gpu/accelerated_video_decoder.h"
+#include "media/gpu/vp8_picture.h"
 
-namespace content {
+namespace media {
 
 // Clients of this class are expected to pass raw VP8 stream and are expected
 // to provide an implementation of VP8Accelerator for offloading final steps
@@ -24,9 +24,9 @@
 //
 // This class must be created, called and destroyed on a single thread, and
 // does nothing internally on any other thread.
-class CONTENT_EXPORT VP8Decoder : public AcceleratedVideoDecoder {
+class MEDIA_GPU_EXPORT VP8Decoder : public AcceleratedVideoDecoder {
  public:
-  class CONTENT_EXPORT VP8Accelerator {
+  class MEDIA_GPU_EXPORT VP8Accelerator {
    public:
     VP8Accelerator();
     virtual ~VP8Accelerator();
@@ -66,7 +66,7 @@
   VP8Decoder(VP8Accelerator* accelerator);
   ~VP8Decoder() override;
 
-  // content::AcceleratedVideoDecoder implementation.
+  // media::AcceleratedVideoDecoder implementation.
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
   void SetStream(const uint8_t* ptr, size_t size) override;
@@ -107,6 +107,6 @@
   DISALLOW_COPY_AND_ASSIGN(VP8Decoder);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VP8_DECODER_H_
+#endif  // MEDIA_GPU_VP8_DECODER_H_
diff --git a/content/common/gpu/media/vp8_picture.cc b/media/gpu/vp8_picture.cc
similarity index 66%
rename from content/common/gpu/media/vp8_picture.cc
rename to media/gpu/vp8_picture.cc
index 63ec8e9..73574941 100644
--- a/content/common/gpu/media/vp8_picture.cc
+++ b/media/gpu/vp8_picture.cc
@@ -2,15 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vp8_picture.h"
+#include "media/gpu/vp8_picture.h"
 
-namespace content {
+namespace media {
 
-VP8Picture::VP8Picture() {
-}
+VP8Picture::VP8Picture() {}
 
-VP8Picture::~VP8Picture() {
-}
+VP8Picture::~VP8Picture() {}
 
 V4L2VP8Picture* VP8Picture::AsV4L2VP8Picture() {
   return nullptr;
@@ -20,4 +18,4 @@
   return nullptr;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vp8_picture.h b/media/gpu/vp8_picture.h
similarity index 75%
rename from content/common/gpu/media/vp8_picture.h
rename to media/gpu/vp8_picture.h
index 602357f..164cf0a 100644
--- a/content/common/gpu/media/vp8_picture.h
+++ b/media/gpu/vp8_picture.h
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VP8_PICTURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VP8_PICTURE_H_
+#ifndef MEDIA_GPU_VP8_PICTURE_H_
+#define MEDIA_GPU_VP8_PICTURE_H_
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 
-namespace content {
+namespace media {
 
 class V4L2VP8Picture;
 class VaapiVP8Picture;
@@ -27,6 +27,6 @@
   DISALLOW_COPY_AND_ASSIGN(VP8Picture);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VP8_PICTURE_H_
+#endif  // MEDIA_GPU_VP8_PICTURE_H_
diff --git a/content/common/gpu/media/vp9_decoder.cc b/media/gpu/vp9_decoder.cc
similarity index 97%
rename from content/common/gpu/media/vp9_decoder.cc
rename to media/gpu/vp9_decoder.cc
index cba92fd..58b42ca 100644
--- a/content/common/gpu/media/vp9_decoder.cc
+++ b/media/gpu/vp9_decoder.cc
@@ -2,14 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vp9_decoder.h"
+#include "media/gpu/vp9_decoder.h"
 
 #include <memory>
 
 #include "base/logging.h"
 #include "media/base/limits.h"
+#include "media/gpu/vp9_decoder.h"
 
-namespace content {
+namespace media {
 
 VP9Decoder::VP9Accelerator::VP9Accelerator() {}
 
@@ -179,4 +180,4 @@
   return media::limits::kMaxVideoFrames + media::kVp9NumRefFrames + 2;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vp9_decoder.h b/media/gpu/vp9_decoder.h
similarity index 90%
rename from content/common/gpu/media/vp9_decoder.h
rename to media/gpu/vp9_decoder.h
index c513a94..20e869e 100644
--- a/content/common/gpu/media/vp9_decoder.h
+++ b/media/gpu/vp9_decoder.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VP9_DECODER_H_
-#define CONTENT_COMMON_GPU_MEDIA_VP9_DECODER_H_
+#ifndef MEDIA_GPU_VP9_DECODER_H_
+#define MEDIA_GPU_VP9_DECODER_H_
 
 #include <stddef.h>
 #include <stdint.h>
@@ -13,11 +13,11 @@
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "content/common/gpu/media/accelerated_video_decoder.h"
-#include "content/common/gpu/media/vp9_picture.h"
 #include "media/filters/vp9_parser.h"
+#include "media/gpu/accelerated_video_decoder.h"
+#include "media/gpu/vp9_picture.h"
 
-namespace content {
+namespace media {
 
 // This class implements an AcceleratedVideoDecoder for VP9 decoding.
 // Clients of this class are expected to pass raw VP9 stream and are expected
@@ -26,9 +26,9 @@
 //
 // This class must be created, called and destroyed on a single thread, and
 // does nothing internally on any other thread.
-class CONTENT_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
+class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
  public:
-  class CONTENT_EXPORT VP9Accelerator {
+  class MEDIA_GPU_EXPORT VP9Accelerator {
    public:
     VP9Accelerator();
     virtual ~VP9Accelerator();
@@ -82,7 +82,7 @@
   VP9Decoder(VP9Accelerator* accelerator);
   ~VP9Decoder() override;
 
-  // content::AcceleratedVideoDecoder implementation.
+  // media::AcceleratedVideoDecoder implementation.
   void SetStream(const uint8_t* ptr, size_t size) override;
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
@@ -129,6 +129,6 @@
   DISALLOW_COPY_AND_ASSIGN(VP9Decoder);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VP9_DECODER_H_
+#endif  // MEDIA_GPU_VP9_DECODER_H_
diff --git a/content/common/gpu/media/vp9_picture.cc b/media/gpu/vp9_picture.cc
similarity index 79%
rename from content/common/gpu/media/vp9_picture.cc
rename to media/gpu/vp9_picture.cc
index 4b20316c..2c6bdf1 100644
--- a/content/common/gpu/media/vp9_picture.cc
+++ b/media/gpu/vp9_picture.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vp9_picture.h"
+#include "media/gpu/vp9_picture.h"
 
-namespace content {
+namespace media {
 
 VP9Picture::VP9Picture() {}
 
@@ -18,4 +18,4 @@
   return nullptr;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vp9_picture.h b/media/gpu/vp9_picture.h
similarity index 78%
rename from content/common/gpu/media/vp9_picture.h
rename to media/gpu/vp9_picture.h
index 5d63ade..a94ff0ea 100644
--- a/content/common/gpu/media/vp9_picture.h
+++ b/media/gpu/vp9_picture.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VP9_PICTURE_H_
-#define CONTENT_COMMON_GPU_MEDIA_VP9_PICTURE_H_
+#ifndef MEDIA_GPU_VP9_PICTURE_H_
+#define MEDIA_GPU_VP9_PICTURE_H_
 
 #include <memory>
 
@@ -11,7 +11,7 @@
 #include "base/memory/ref_counted.h"
 #include "media/filters/vp9_parser.h"
 
-namespace content {
+namespace media {
 
 class V4L2VP9Picture;
 class VaapiVP9Picture;
@@ -32,6 +32,6 @@
   DISALLOW_COPY_AND_ASSIGN(VP9Picture);
 };
 
-}  // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VP9_PICTURE_H_
+#endif  // MEDIA_GPU_VP9_PICTURE_H_
diff --git a/content/common/gpu/media/vt.sig b/media/gpu/vt.sig
similarity index 100%
rename from content/common/gpu/media/vt.sig
rename to media/gpu/vt.sig
diff --git a/media/gpu/vt_mac.h b/media/gpu/vt_mac.h
new file mode 100644
index 0000000..fbaa885
--- /dev/null
+++ b/media/gpu/vt_mac.h
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VT_H_
+#define MEDIA_GPU_VT_H_
+
+// Dynamic library loader.
+#include "media/gpu/vt_stubs.h"
+
+// CoreMedia and VideoToolbox types.
+#include "media/gpu/vt_stubs_header.fragment"
+
+// CoreMedia and VideoToolbox functions.
+extern "C" {
+#include "media/gpu/vt.sig"
+}  // extern "C"
+
+#endif  // MEDIA_GPU_VT_H_
diff --git a/content/common/gpu/media/vt_stubs_header.fragment b/media/gpu/vt_stubs_header.fragment
similarity index 100%
rename from content/common/gpu/media/vt_stubs_header.fragment
rename to media/gpu/vt_stubs_header.fragment
diff --git a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc b/media/gpu/vt_video_decode_accelerator_mac.cc
similarity index 85%
rename from content/common/gpu/media/vt_video_decode_accelerator_mac.cc
rename to media/gpu/vt_video_decode_accelerator_mac.cc
index 1942577..2f989e30 100644
--- a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
+++ b/media/gpu/vt_video_decode_accelerator_mac.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
+#include "media/gpu/vt_video_decode_accelerator_mac.h"
 
 #include <CoreVideo/CoreVideo.h>
 #include <OpenGL/CGLIOSurface.h>
@@ -28,34 +28,30 @@
 #include "ui/gl/gl_implementation.h"
 #include "ui/gl/scoped_binders.h"
 
-using content_common_gpu_media::kModuleVt;
-using content_common_gpu_media::InitializeStubs;
-using content_common_gpu_media::IsVtInitialized;
-using content_common_gpu_media::StubPathMap;
+using media_gpu::kModuleVt;
+using media_gpu::InitializeStubs;
+using media_gpu::IsVtInitialized;
+using media_gpu::StubPathMap;
 
-#define NOTIFY_STATUS(name, status, session_failure)   \
-    do {                                               \
-      OSSTATUS_DLOG(ERROR, status) << name;            \
-      NotifyError(PLATFORM_FAILURE, session_failure);  \
-    } while (0)
+#define NOTIFY_STATUS(name, status, session_failure) \
+  do {                                               \
+    OSSTATUS_DLOG(ERROR, status) << name;            \
+    NotifyError(PLATFORM_FAILURE, session_failure);  \
+  } while (0)
 
-namespace content {
+namespace media {
 
 // Only H.264 with 4:2:0 chroma sampling is supported.
 static const media::VideoCodecProfile kSupportedProfiles[] = {
-  media::H264PROFILE_BASELINE,
-  media::H264PROFILE_MAIN,
-  media::H264PROFILE_EXTENDED,
-  media::H264PROFILE_HIGH,
-  // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
-  // that some codecs fail to check the profile during initialization and
-  // then fail on the first frame decode, which currently results in a
-  // pipeline failure.
-  // media::H264PROFILE_HIGH10PROFILE,
-  media::H264PROFILE_SCALABLEBASELINE,
-  media::H264PROFILE_SCALABLEHIGH,
-  media::H264PROFILE_STEREOHIGH,
-  media::H264PROFILE_MULTIVIEWHIGH,
+    media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
+    media::H264PROFILE_EXTENDED, media::H264PROFILE_HIGH,
+    // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
+    // that some codecs fail to check the profile during initialization and
+    // then fail on the first frame decode, which currently results in a
+    // pipeline failure.
+    // media::H264PROFILE_HIGH10PROFILE,
+    media::H264PROFILE_SCALABLEBASELINE, media::H264PROFILE_SCALABLEHIGH,
+    media::H264PROFILE_STEREOHIGH, media::H264PROFILE_MULTIVIEWHIGH,
 };
 
 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
@@ -73,8 +69,8 @@
 static const int kMaxReorderQueueSize = 16;
 
 // Build an |image_config| dictionary for VideoToolbox initialization.
-static base::ScopedCFTypeRef<CFMutableDictionaryRef>
-BuildImageConfig(CMVideoDimensions coded_dimensions) {
+static base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
+    CMVideoDimensions coded_dimensions) {
   base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
 
   // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
@@ -88,12 +84,10 @@
   if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get())
     return image_config;
 
-  image_config.reset(
-      CFDictionaryCreateMutable(
-          kCFAllocatorDefault,
-          3,  // capacity
-          &kCFTypeDictionaryKeyCallBacks,
-          &kCFTypeDictionaryValueCallBacks));
+  image_config.reset(CFDictionaryCreateMutable(
+      kCFAllocatorDefault,
+      3,  // capacity
+      &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
   if (!image_config.get())
     return image_config;
 
@@ -111,8 +105,10 @@
 // successful.
 //
 // TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar.
-static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
-                                      const uint8_t* pps, size_t pps_size,
+static bool CreateVideoToolboxSession(const uint8_t* sps,
+                                      size_t sps_size,
+                                      const uint8_t* pps,
+                                      size_t pps_size,
                                       bool require_hardware) {
   const uint8_t* data_ptrs[] = {sps, pps};
   const size_t data_sizes[] = {sps_size, pps_size};
@@ -120,10 +116,10 @@
   base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
   OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
       kCFAllocatorDefault,
-      2,                    // parameter_set_count
-      data_ptrs,            // &parameter_set_pointers
-      data_sizes,           // &parameter_set_sizes
-      kNALUHeaderLength,    // nal_unit_header_length
+      2,                  // parameter_set_count
+      data_ptrs,          // &parameter_set_pointers
+      data_sizes,         // &parameter_set_sizes
+      kNALUHeaderLength,  // nal_unit_header_length
       format.InitializeInto());
   if (status) {
     OSSTATUS_DLOG(WARNING, status)
@@ -132,11 +128,10 @@
   }
 
   base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
-      CFDictionaryCreateMutable(
-          kCFAllocatorDefault,
-          1,  // capacity
-          &kCFTypeDictionaryKeyCallBacks,
-          &kCFTypeDictionaryValueCallBacks));
+      CFDictionaryCreateMutable(kCFAllocatorDefault,
+                                1,  // capacity
+                                &kCFTypeDictionaryKeyCallBacks,
+                                &kCFTypeDictionaryValueCallBacks));
   if (!decoder_config.get())
     return false;
 
@@ -144,8 +139,7 @@
     CFDictionarySetValue(
         decoder_config,
         // kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
-        CFSTR("RequireHardwareAcceleratedVideoDecoder"),
-        kCFBooleanTrue);
+        CFSTR("RequireHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
   }
 
   base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
@@ -158,14 +152,13 @@
   base::ScopedCFTypeRef<VTDecompressionSessionRef> session;
   status = VTDecompressionSessionCreate(
       kCFAllocatorDefault,
-      format,               // video_format_description
-      decoder_config,       // video_decoder_specification
-      image_config,         // destination_image_buffer_attributes
-      &callback,            // output_callback
+      format,          // video_format_description
+      decoder_config,  // video_decoder_specification
+      image_config,    // destination_image_buffer_attributes
+      &callback,       // output_callback
       session.InitializeInto());
   if (status) {
-    OSSTATUS_DLOG(WARNING, status)
-        << "Failed to create VTDecompressionSession";
+    OSSTATUS_DLOG(WARNING, status) << "Failed to create VTDecompressionSession";
     return false;
   }
 
@@ -235,36 +228,31 @@
 }
 
 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
-static void OutputThunk(
-    void* decompression_output_refcon,
-    void* source_frame_refcon,
-    OSStatus status,
-    VTDecodeInfoFlags info_flags,
-    CVImageBufferRef image_buffer,
-    CMTime presentation_time_stamp,
-    CMTime presentation_duration) {
+static void OutputThunk(void* decompression_output_refcon,
+                        void* source_frame_refcon,
+                        OSStatus status,
+                        VTDecodeInfoFlags info_flags,
+                        CVImageBufferRef image_buffer,
+                        CMTime presentation_time_stamp,
+                        CMTime presentation_duration) {
   VTVideoDecodeAccelerator* vda =
       reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
   vda->Output(source_frame_refcon, status, image_buffer);
 }
 
-VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
-}
+VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {}
 
 VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
 
-VTVideoDecodeAccelerator::Task::~Task() {
-}
+VTVideoDecodeAccelerator::Task::~Task() {}
 
 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
     : bitstream_id(bitstream_id),
       pic_order_cnt(0),
       is_idr(false),
-      reorder_window(0) {
-}
+      reorder_window(0) {}
 
-VTVideoDecodeAccelerator::Frame::~Frame() {
-}
+VTVideoDecodeAccelerator::Frame::~Frame() {}
 
 VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id,
                                                    uint32_t service_texture_id)
@@ -354,8 +342,7 @@
 
   // Count the session as successfully initialized.
   UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
-                            SFT_SUCCESSFULLY_INITIALIZED,
-                            SFT_MAX + 1);
+                            SFT_SUCCESSFULLY_INITIALIZED, SFT_MAX + 1);
   return true;
 }
 
@@ -364,8 +351,8 @@
   if (session_) {
     OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_);
     if (status) {
-      NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()",
-                    status, SFT_PLATFORM_ERROR);
+      NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", status,
+                    SFT_PLATFORM_ERROR);
       return false;
     }
   }
@@ -395,10 +382,10 @@
   format_.reset();
   OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
       kCFAllocatorDefault,
-      nalu_data_ptrs.size(),      // parameter_set_count
-      &nalu_data_ptrs.front(),    // &parameter_set_pointers
-      &nalu_data_sizes.front(),   // &parameter_set_sizes
-      kNALUHeaderLength,          // nal_unit_header_length
+      nalu_data_ptrs.size(),     // parameter_set_count
+      &nalu_data_ptrs.front(),   // &parameter_set_pointers
+      &nalu_data_sizes.front(),  // &parameter_set_sizes
+      kNALUHeaderLength,         // nal_unit_header_length
       format_.InitializeInto());
   if (status) {
     NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
@@ -416,11 +403,10 @@
 
   // Prepare VideoToolbox configuration dictionaries.
   base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
-      CFDictionaryCreateMutable(
-          kCFAllocatorDefault,
-          1,  // capacity
-          &kCFTypeDictionaryKeyCallBacks,
-          &kCFTypeDictionaryValueCallBacks));
+      CFDictionaryCreateMutable(kCFAllocatorDefault,
+                                1,  // capacity
+                                &kCFTypeDictionaryKeyCallBacks,
+                                &kCFTypeDictionaryValueCallBacks));
   if (!decoder_config.get()) {
     DLOG(ERROR) << "Failed to create CFMutableDictionary";
     NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
@@ -430,8 +416,7 @@
   CFDictionarySetValue(
       decoder_config,
       // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
-      CFSTR("EnableHardwareAcceleratedVideoDecoder"),
-      kCFBooleanTrue);
+      CFSTR("EnableHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
 
   base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
       BuildImageConfig(coded_dimensions));
@@ -449,10 +434,10 @@
   session_.reset();
   status = VTDecompressionSessionCreate(
       kCFAllocatorDefault,
-      format_,              // video_format_description
-      decoder_config,       // video_decoder_specification
-      image_config,         // destination_image_buffer_attributes
-      &callback_,           // output_callback
+      format_,         // video_format_description
+      decoder_config,  // video_decoder_specification
+      image_config,    // destination_image_buffer_attributes
+      &callback_,      // output_callback
       session_.InitializeInto());
   if (status) {
     NOTIFY_STATUS("VTDecompressionSessionCreate()", status,
@@ -466,8 +451,7 @@
   if (VTSessionCopyProperty(
           session_,
           // kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder
-          CFSTR("UsingHardwareAcceleratedVideoDecoder"),
-          kCFAllocatorDefault,
+          CFSTR("UsingHardwareAcceleratedVideoDecoder"), kCFAllocatorDefault,
           cf_using_hardware.InitializeInto()) == 0) {
     using_hardware = CFBooleanGetValue(cf_using_hardware);
   }
@@ -617,8 +601,8 @@
 
           if (sps->vui_parameters_present_flag &&
               sps->bitstream_restriction_flag) {
-            frame->reorder_window = std::min(sps->max_num_reorder_frames,
-                                             kMaxReorderQueueSize - 1);
+            frame->reorder_window =
+                std::min(sps->max_num_reorder_frames, kMaxReorderQueueSize - 1);
           }
         }
         has_slice = true;
@@ -677,8 +661,9 @@
     // Keep everything in order by flushing first.
     if (!FinishDelayedFrames())
       return;
-    gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
-        &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
+    gpu_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
     return;
   }
 
@@ -726,8 +711,8 @@
   for (size_t i = 0; i < nalus.size(); i++) {
     media::H264NALU& nalu = nalus[i];
     uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
-    status = CMBlockBufferReplaceDataBytes(
-        &header, data, offset, kNALUHeaderLength);
+    status =
+        CMBlockBufferReplaceDataBytes(&header, data, offset, kNALUHeaderLength);
     if (status) {
       NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status,
                     SFT_PLATFORM_ERROR);
@@ -745,19 +730,18 @@
 
   // Package the data in a CMSampleBuffer.
   base::ScopedCFTypeRef<CMSampleBufferRef> sample;
-  status = CMSampleBufferCreate(
-      kCFAllocatorDefault,
-      data,                 // data_buffer
-      true,                 // data_ready
-      nullptr,              // make_data_ready_callback
-      nullptr,              // make_data_ready_refcon
-      format_,              // format_description
-      1,                    // num_samples
-      0,                    // num_sample_timing_entries
-      nullptr,              // &sample_timing_array
-      1,                    // num_sample_size_entries
-      &data_size,           // &sample_size_array
-      sample.InitializeInto());
+  status = CMSampleBufferCreate(kCFAllocatorDefault,
+                                data,        // data_buffer
+                                true,        // data_ready
+                                nullptr,     // make_data_ready_callback
+                                nullptr,     // make_data_ready_refcon
+                                format_,     // format_description
+                                1,           // num_samples
+                                0,           // num_sample_timing_entries
+                                nullptr,     // &sample_timing_array
+                                1,           // num_sample_size_entries
+                                &data_size,  // &sample_size_array
+                                sample.InitializeInto());
   if (status) {
     NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
     return;
@@ -772,10 +756,10 @@
       kVTDecodeFrame_EnableAsynchronousDecompression;
   status = VTDecompressionSessionDecodeFrame(
       session_,
-      sample,                                 // sample_buffer
-      decode_flags,                           // decode_flags
-      reinterpret_cast<void*>(frame),         // source_frame_refcon
-      nullptr);                               // &info_flags_out
+      sample,                          // sample_buffer
+      decode_flags,                    // decode_flags
+      reinterpret_cast<void*>(frame),  // source_frame_refcon
+      nullptr);                        // &info_flags_out
   if (status) {
     NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status,
                   SFT_DECODE_ERROR);
@@ -784,10 +768,9 @@
 }
 
 // This method may be called on any VideoToolbox thread.
-void VTVideoDecodeAccelerator::Output(
-    void* source_frame_refcon,
-    OSStatus status,
-    CVImageBufferRef image_buffer) {
+void VTVideoDecodeAccelerator::Output(void* source_frame_refcon,
+                                      OSStatus status,
+                                      CVImageBufferRef image_buffer) {
   if (status) {
     NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR);
     return;
@@ -809,8 +792,9 @@
 
   Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
   frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
-  gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
-      &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
+  gpu_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
 }
 
 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
@@ -829,8 +813,9 @@
 
   // Always queue a task, even if FinishDelayedFrames() fails, so that
   // destruction always completes.
-  gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
-      &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
+  gpu_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
 }
 
 void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
@@ -876,8 +861,9 @@
   // Pictures are not marked as uncleared until after this method returns, and
   // they will be broken if they are used before that happens. So, schedule
   // future work after that happens.
-  gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
-      &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
+  gpu_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
 }
 
 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
@@ -982,9 +968,9 @@
   // If the next task is a flush (because there is a pending flush or becuase
   // the next frame is an IDR), then we don't need a full reorder buffer to send
   // the next frame.
-  bool flushing = !task_queue_.empty() &&
-                  (task_queue_.front().type != TASK_FRAME ||
-                   task_queue_.front().frame->is_idr);
+  bool flushing =
+      !task_queue_.empty() && (task_queue_.front().type != TASK_FRAME ||
+                               task_queue_.front().frame->is_idr);
 
   size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window);
   if (flushing || reorder_queue_.size() > reorder_window) {
@@ -1057,7 +1043,7 @@
           frame.image.get(), gfx::GenericSharedMemoryId(),
           gfx::BufferFormat::YUV_420_BIPLANAR)) {
     NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
-        SFT_PLATFORM_ERROR);
+                  SFT_PLATFORM_ERROR);
   }
 
   if (!bind_image_cb_.Run(picture_info->client_texture_id,
@@ -1078,8 +1064,7 @@
   // resolution changed. We should find the correct API to get the real
   // coded size and fix it.
   client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
-                                       gfx::Rect(frame.coded_size),
-                                       true));
+                                       gfx::Rect(frame.coded_size), true));
   return true;
 }
 
@@ -1088,14 +1073,14 @@
     VTVDASessionFailureType session_failure_type) {
   DCHECK_LT(session_failure_type, SFT_MAX + 1);
   if (!gpu_thread_checker_.CalledOnValidThread()) {
-    gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
-        &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type,
-        session_failure_type));
+    gpu_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&VTVideoDecodeAccelerator::NotifyError, weak_this_,
+                   vda_error_type, session_failure_type));
   } else if (state_ == STATE_DECODING) {
     state_ = STATE_ERROR;
     UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
-                              session_failure_type,
-                              SFT_MAX + 1);
+                              session_failure_type, SFT_MAX + 1);
     client_->NotifyError(vda_error_type);
   }
 }
@@ -1162,4 +1147,4 @@
   return profiles;
 }
 
-}  // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vt_video_decode_accelerator_mac.h b/media/gpu/vt_video_decode_accelerator_mac.h
similarity index 91%
rename from content/common/gpu/media/vt_video_decode_accelerator_mac.h
rename to media/gpu/vt_video_decode_accelerator_mac.h
index 33440ba..77d3915 100644
--- a/content/common/gpu/media/vt_video_decode_accelerator_mac.h
+++ b/media/gpu/vt_video_decode_accelerator_mac.h
@@ -18,16 +18,16 @@
 #include "base/message_loop/message_loop.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_checker.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
-#include "content/common/gpu/media/vt_mac.h"
 #include "media/filters/h264_parser.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/vt_mac.h"
 #include "media/video/h264_poc.h"
 #include "media/video/video_decode_accelerator.h"
 #include "ui/gfx/geometry/size.h"
 #include "ui/gl/gl_context_cgl.h"
 #include "ui/gl/gl_image_io_surface.h"
 
-namespace content {
+namespace media {
 
 // Preload VideoToolbox libraries, needed for sandbox warmup.
 bool InitializeVideoToolbox();
@@ -57,13 +57,12 @@
       override;
 
   // Called by OutputThunk() when VideoToolbox finishes decoding a frame.
-  void Output(
-      void* source_frame_refcon,
-      OSStatus status,
-      CVImageBufferRef image_buffer);
+  void Output(void* source_frame_refcon,
+              OSStatus status,
+              CVImageBufferRef image_buffer);
 
   static media::VideoDecodeAccelerator::SupportedProfiles
-      GetSupportedProfiles();
+  GetSupportedProfiles();
 
  private:
   // Logged to UMA, so never reuse values. Make sure to update
@@ -152,10 +151,9 @@
 
   // Compute the |pic_order_cnt| for a frame. Returns true or calls
   // NotifyError() before returning false.
-  bool ComputePicOrderCnt(
-      const media::H264SPS* sps,
-      const media::H264SliceHeader& slice_hdr,
-      Frame* frame);
+  bool ComputePicOrderCnt(const media::H264SPS* sps,
+                          const media::H264SliceHeader& slice_hdr,
+                          Frame* frame);
 
   // Set up VideoToolbox using the current SPS and PPS. Returns true or calls
   // NotifyError() before returning false.
@@ -172,9 +170,8 @@
   //
   // Methods for interacting with |client_|. Run on |gpu_task_runner_|.
   //
-  void NotifyError(
-      Error vda_error_type,
-      VTVDASessionFailureType session_failure_type);
+  void NotifyError(Error vda_error_type,
+                   VTVDASessionFailureType session_failure_type);
 
   // |type| is the type of task that the flush will complete, one of TASK_FLUSH,
   // TASK_RESET, or TASK_DESTROY.
@@ -210,15 +207,15 @@
 
   // Utility class to define the order of frames in the reorder queue.
   struct FrameOrder {
-    bool operator()(
-        const linked_ptr<Frame>& lhs,
-        const linked_ptr<Frame>& rhs) const;
+    bool operator()(const linked_ptr<Frame>& lhs,
+                    const linked_ptr<Frame>& rhs) const;
   };
 
   // Queue of decoded frames in presentation order.
   std::priority_queue<linked_ptr<Frame>,
                       std::vector<linked_ptr<Frame>>,
-                      FrameOrder> reorder_queue_;
+                      FrameOrder>
+      reorder_queue_;
 
   // Size of assigned picture buffers.
   gfx::Size picture_size_;
@@ -275,6 +272,6 @@
   DISALLOW_COPY_AND_ASSIGN(VTVideoDecodeAccelerator);
 };
 
-}  // namespace content
+}  // namespace media
 
 #endif  // CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/common/gpu/media/vt_video_encode_accelerator_mac.cc b/media/gpu/vt_video_encode_accelerator_mac.cc
similarity index 93%
rename from content/common/gpu/media/vt_video_encode_accelerator_mac.cc
rename to media/gpu/vt_video_encode_accelerator_mac.cc
index d48b92d..f9fadaf2 100644
--- a/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
+++ b/media/gpu/vt_video_encode_accelerator_mac.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
+#include "media/gpu/vt_video_encode_accelerator_mac.h"
 
 #include <memory>
 
@@ -11,7 +11,7 @@
 #include "media/base/mac/corevideo_glue.h"
 #include "media/base/mac/video_frame_mac.h"
 
-namespace content {
+namespace media {
 
 namespace {
 
@@ -30,8 +30,7 @@
 }  // namespace
 
 struct VTVideoEncodeAccelerator::InProgressFrameEncode {
-  InProgressFrameEncode(base::TimeDelta rtp_timestamp,
-                        base::TimeTicks ref_time)
+  InProgressFrameEncode(base::TimeDelta rtp_timestamp, base::TimeTicks ref_time)
       : timestamp(rtp_timestamp), reference_time(ref_time) {}
   const base::TimeDelta timestamp;
   const base::TimeTicks reference_time;
@@ -130,8 +129,7 @@
     return false;
   }
   if (media::H264PROFILE_BASELINE != output_profile) {
-    DLOG(ERROR) << "Output profile not supported= "
-                << output_profile;
+    DLOG(ERROR) << "Output profile not supported= " << output_profile;
     return false;
   }
 
@@ -228,9 +226,8 @@
 
   if (encoder_thread_.IsRunning()) {
     encoder_thread_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
-                   base::Unretained(this)));
+        FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
     encoder_thread_.Stop();
   } else {
     DestroyTask();
@@ -416,9 +413,8 @@
       CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
           encode_output->sample_buffer.get(), true),
       0));
-  const bool keyframe =
-      !CFDictionaryContainsKey(sample_attachments,
-                               CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
+  const bool keyframe = !CFDictionaryContainsKey(
+      sample_attachments, CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
 
   size_t used_buffer_size = 0;
   const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
@@ -439,11 +435,9 @@
 
   DestroyCompressionSession();
 
-  CFTypeRef attributes_keys[] = {
-    kCVPixelBufferOpenGLCompatibilityKey,
-    kCVPixelBufferIOSurfacePropertiesKey,
-    kCVPixelBufferPixelFormatTypeKey
-  };
+  CFTypeRef attributes_keys[] = {kCVPixelBufferOpenGLCompatibilityKey,
+                                 kCVPixelBufferIOSurfacePropertiesKey,
+                                 kCVPixelBufferPixelFormatTypeKey};
   const int format[] = {
       CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
   CFTypeRef attributes_values[] = {
@@ -480,12 +474,14 @@
   std::vector<CFTypeRef> encoder_keys;
   std::vector<CFTypeRef> encoder_values;
   if (require_hw_encoding) {
-    encoder_keys.push_back(videotoolbox_glue_
-      ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
+    encoder_keys.push_back(
+        videotoolbox_glue_
+            ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
     encoder_values.push_back(kCFBooleanTrue);
   } else {
-    encoder_keys.push_back(videotoolbox_glue_
-        ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
+    encoder_keys.push_back(
+        videotoolbox_glue_
+            ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
     encoder_values.push_back(kCFBooleanTrue);
   }
   base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
@@ -502,16 +498,11 @@
   // before returning to the client. Therefore, when control returns to us, we
   // are guaranteed that the output callback will not execute again.
   OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
-      kCFAllocatorDefault,
-      input_size.width(),
-      input_size.height(),
-      CoreMediaGlue::kCMVideoCodecType_H264,
-      encoder_spec,
-      attributes,
+      kCFAllocatorDefault, input_size.width(), input_size.height(),
+      CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, attributes,
       nullptr /* compressedDataAllocator */,
       &VTVideoEncodeAccelerator::CompressionCallback,
-      reinterpret_cast<void*>(this),
-      compression_session_.InitializeInto());
+      reinterpret_cast<void*>(this), compression_session_.InitializeInto());
   if (status != noErr) {
     DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
     return false;
@@ -551,4 +542,4 @@
   }
 }
 
-} // namespace content
+}  // namespace media
diff --git a/content/common/gpu/media/vt_video_encode_accelerator_mac.h b/media/gpu/vt_video_encode_accelerator_mac.h
similarity index 93%
rename from content/common/gpu/media/vt_video_encode_accelerator_mac.h
rename to media/gpu/vt_video_encode_accelerator_mac.h
index dfa180e..c12558c 100644
--- a/content/common/gpu/media/vt_video_encode_accelerator_mac.h
+++ b/media/gpu/vt_video_encode_accelerator_mac.h
@@ -2,23 +2,23 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
-#define CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+#ifndef MEDIA_GPU_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+#define MEDIA_GPU_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
 
 #include <memory>
 
 #include "base/mac/scoped_cftyperef.h"
-#include "content/common/content_export.h"
 #include "media/base/mac/videotoolbox_glue.h"
 #include "media/base/mac/videotoolbox_helpers.h"
+#include "media/gpu/media_gpu_export.h"
 #include "media/video/video_encode_accelerator.h"
 
-namespace content {
+namespace media {
 
 // VideoToolbox.framework implementation of the VideoEncodeAccelerator
 // interface for MacOSX. VideoToolbox makes no guarantees that it is thread
 // safe, so this object is pinned to the thread on which it is constructed.
-class CONTENT_EXPORT VTVideoEncodeAccelerator
+class MEDIA_GPU_EXPORT VTVideoEncodeAccelerator
     : public media::VideoEncodeAccelerator {
  public:
   VTVideoEncodeAccelerator();
@@ -140,6 +140,6 @@
   DISALLOW_COPY_AND_ASSIGN(VTVideoEncodeAccelerator);
 };
 
-} // namespace content
+}  // namespace media
 
-#endif  // CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+#endif  // MEDIA_GPU_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
diff --git a/media/media.gyp b/media/media.gyp
index 7681aca7..9c03efa 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -1618,6 +1618,12 @@
         }],
       ],
     },
+    {
+      # GN version: //media/gpu
+      'target_name': 'media_gpu',
+      'type': 'static_library',
+      'includes': [ 'media_gpu.gypi' ],
+    },
   ],
   'conditions': [
     ['target_arch=="ia32" or target_arch=="x64"', {
@@ -2084,5 +2090,200 @@
         },
       ],
     }],
+    ['chromeos==1', {
+      'targets': [
+        {
+          'target_name': 'jpeg_decode_accelerator_unittest',
+          'type': 'executable',
+          'dependencies': [
+            '../base/base.gyp:base',
+            '../media/media.gyp:media',
+            '../media/media.gyp:media_gpu',
+            '../media/media.gyp:media_test_support',
+            '../testing/gtest.gyp:gtest',
+            '../third_party/libyuv/libyuv.gyp:libyuv',
+            '../ui/gfx/gfx.gyp:gfx',
+            '../ui/gfx/gfx.gyp:gfx_geometry',
+            '../ui/gl/gl.gyp:gl',
+            '../ui/gl/gl.gyp:gl_test_support',
+          ],
+          'sources': [
+            'gpu/jpeg_decode_accelerator_unittest.cc',
+          ],
+          'include_dirs': [
+            '<(DEPTH)/third_party/libva',
+            '<(DEPTH)/third_party/libyuv',
+          ],
+        }
+      ]
+    }],
+    ['chromeos==1 or OS=="mac"', {
+      'targets': [
+        {
+          'target_name': 'video_encode_accelerator_unittest',
+          'type': 'executable',
+          'dependencies': [
+            '../base/base.gyp:base',
+            '../media/media.gyp:media',
+            '../media/media.gyp:media_gpu',
+            '../media/media.gyp:media_test_support',
+            '../testing/gtest.gyp:gtest',
+            '../ui/base/ui_base.gyp:ui_base',
+            '../ui/gfx/gfx.gyp:gfx',
+            '../ui/gfx/gfx.gyp:gfx_geometry',
+            '../ui/gfx/gfx.gyp:gfx_test_support',
+            '../ui/gl/gl.gyp:gl',
+            '../ui/gl/gl.gyp:gl_test_support',
+          ],
+          'sources': [
+            'gpu/video_accelerator_unittest_helpers.h',
+            'gpu/video_encode_accelerator_unittest.cc',
+          ],
+          'include_dirs': [
+            '<(DEPTH)/third_party/libva',
+            '<(DEPTH)/third_party/libyuv',
+          ],
+          'conditions': [
+            ['use_x11==1', {
+              'dependencies': [
+                '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
+              ],
+            }],
+            ['use_ozone==1', {
+              'dependencies': [
+                '../ui/ozone/ozone.gyp:ozone',
+              ],
+            }],
+          ],
+        }
+      ]
+    }],
+    ['chromeos==1 or OS=="win" or OS=="android"', {
+      'targets': [
+          {
+            # TODO(GYP): Port Windows and ChromeOS logic.
+            # GN: //media/gpu:video_decode_accelerator_unittest
+            'target_name': 'video_decode_accelerator_unittest',
+            'type': '<(gtest_target_type)',
+            'dependencies': [
+              '../base/base.gyp:base',
+              '../gpu/gpu.gyp:command_buffer_service',
+              '../media/gpu/ipc/media_ipc.gyp:media_gpu_ipc_service',
+              '../media/media.gyp:media',
+              '../media/media.gyp:media_gpu',
+              '../testing/gtest.gyp:gtest',
+              '../ui/base/ui_base.gyp:ui_base',
+              '../ui/gfx/gfx.gyp:gfx',
+              '../ui/gfx/gfx.gyp:gfx_geometry',
+              '../ui/gfx/gfx.gyp:gfx_test_support',
+              '../ui/gl/gl.gyp:gl',
+              '../ui/gl/gl.gyp:gl_test_support',
+            ],
+            'include_dirs': [
+              '<(DEPTH)/third_party/khronos',
+            ],
+            'sources': [
+              'gpu/android_video_decode_accelerator_unittest.cc',
+              'gpu/rendering_helper.cc',
+              'gpu/rendering_helper.h',
+              'gpu/video_accelerator_unittest_helpers.h',
+              'gpu/video_decode_accelerator_unittest.cc',
+            ],
+            'conditions': [
+              ['OS=="android"', {
+                'sources/': [
+                  ['exclude', '^gpu/rendering_helper.h'],
+                  ['exclude', '^gpu/rendering_helper.cc'],
+                  ['exclude', '^gpu/video_decode_accelerator_unittest.cc'],
+                ],
+                'dependencies': [
+                  '../media/media.gyp:player_android',
+                  '../testing/gmock.gyp:gmock',
+                  '../testing/android/native_test.gyp:native_test_native_code',
+                  '../gpu/gpu.gyp:gpu_unittest_utils',
+                ],
+              }, {  # OS!="android"
+                'sources/': [
+                  ['exclude', '^gpu/android_video_decode_accelerator_unittest.cc'],
+                ],
+              }],
+              ['OS=="win"', {
+                'dependencies': [
+                  '<(angle_path)/src/angle.gyp:libEGL',
+                  '<(angle_path)/src/angle.gyp:libGLESv2',
+                ],
+              }],
+              ['target_arch != "arm" and (OS=="linux" or chromeos == 1)', {
+                'include_dirs': [
+                  '<(DEPTH)/third_party/libva',
+                ],
+              }],
+              ['use_x11==1', {
+                'dependencies': [
+                  '../build/linux/system.gyp:x11',  # Used by rendering_helper.cc
+                  '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
+                ],
+              }],
+              ['use_ozone==1 and chromeos==1', {
+                'dependencies': [
+                  '../ui/display/display.gyp:display',  # Used by rendering_helper.cc
+                  '../ui/ozone/ozone.gyp:ozone',  # Used by rendering_helper.cc
+                ],
+              }],
+            ],
+            # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+            'msvs_disabled_warnings': [ 4267 ],
+          },
+        ],
+    }],
+    ['OS=="android"', {
+      'targets': [
+        {
+          'target_name': 'video_decode_accelerator_unittest_apk',
+          'type': 'none',
+          'dependencies': [
+            'video_decode_accelerator_unittest',
+          ],
+          'variables': {
+            'test_suite_name': 'video_decode_accelerator_unittest',
+          },
+          'includes': [ '../build/apk_test.gypi' ],
+        },
+      ],
+    }],
+
+    ['chromeos==1 and target_arch != "arm"', {
+      'targets': [
+          {
+            'target_name': 'vaapi_jpeg_decoder_unittest',
+            'type': '<(gtest_target_type)',
+            'dependencies': [
+              '../media/media.gyp:media_gpu',
+              '../base/base.gyp:base',
+              '../media/media.gyp:media',
+              '../media/media.gyp:media_test_support',
+              '../testing/gtest.gyp:gtest',
+            ],
+            'sources': [
+              'gpu/vaapi_jpeg_decoder_unittest.cc',
+            ],
+            'include_dirs': [
+              '<(DEPTH)/third_party/libva',
+            ],
+            'conditions': [
+              ['use_x11==1', {
+                'dependencies': [
+                  '../build/linux/system.gyp:x11',
+                  '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
+                ]
+              }, {
+                'dependencies': [
+                  '../build/linux/system.gyp:libdrm',
+                ]
+              }],
+            ],
+          }
+        ]
+    }],
   ],
 }
diff --git a/media/media_gpu.gypi b/media/media_gpu.gypi
new file mode 100644
index 0000000..df2597b
--- /dev/null
+++ b/media/media_gpu.gypi
@@ -0,0 +1,357 @@
+{
+  'variables': {
+    'use_v4lplugin%': 0,
+    'use_v4l2_codec%': 0,
+  },
+  'defines': [
+    'MEDIA_GPU_IMPLEMENTATION'
+  ],
+  'dependencies': [
+    '../base/base.gyp:base',
+    '../gpu/gpu.gyp:gpu',
+    '../media/media.gyp:media',
+    '../ui/display/display.gyp:display_types',
+    '../ui/gfx/gfx.gyp:gfx_geometry',
+    '../ui/gl/gl.gyp:gl',
+    '../ui/platform_window/platform_window.gyp:platform_window',
+  ],
+  'sources': [
+    'gpu/fake_video_decode_accelerator.cc',
+    'gpu/fake_video_decode_accelerator.h',
+    'gpu/gpu_video_accelerator_util.cc',
+    'gpu/gpu_video_accelerator_util.h',
+    'gpu/gpu_video_decode_accelerator_factory_impl.cc',
+    'gpu/gpu_video_decode_accelerator_factory_impl.h',
+    'gpu/gpu_video_decode_accelerator_helpers.h',
+    'gpu/shared_memory_region.cc',
+    'gpu/shared_memory_region.h',
+  ],
+  'include_dirs': [
+    '..',
+  ],
+  'conditions': [
+    ['OS=="mac"', {
+      'dependencies': [
+        '../media/media.gyp:media',
+        '../content/app/resources/content_resources.gyp:content_resources',
+        '../ui/accelerated_widget_mac/accelerated_widget_mac.gyp:accelerated_widget_mac'
+      ],
+      'sources': [
+        'gpu/vt_mac.h',
+        'gpu/vt_video_decode_accelerator_mac.cc',
+        'gpu/vt_video_decode_accelerator_mac.h',
+        'gpu/vt_video_encode_accelerator_mac.cc',
+        'gpu/vt_video_encode_accelerator_mac.h',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
+          '$(SDKROOT)/System/Library/Frameworks/CoreMedia.framework',
+          '$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
+          '$(SDKROOT)/System/Library/Frameworks/IOSurface.framework',
+          '$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
+          '$(SDKROOT)/usr/lib/libsandbox.dylib',
+        ],
+      },
+      'variables': {
+        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
+        'extra_header': 'gpu/vt_stubs_header.fragment',
+        'sig_files': ['gpu/vt.sig'],
+        'outfile_type': 'posix_stubs',
+        'stubs_filename_root': 'vt_stubs',
+        'project_path': 'media/gpu',
+        'intermediate_dir': '<(INTERMEDIATE_DIR)',
+        'output_root': '<(SHARED_INTERMEDIATE_DIR)/vt_stubs',
+      },
+      'include_dirs': [
+        '<(output_root)',
+      ],
+      'actions': [
+        {
+          'action_name': 'generate_stubs',
+          'inputs': [
+            '<(generate_stubs_script)',
+            '<(extra_header)',
+            '<@(sig_files)',
+          ],
+          'outputs': [
+            '<(intermediate_dir)/<(stubs_filename_root).cc',
+            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
+          ],
+          'action': ['python',
+                     '<(generate_stubs_script)',
+                     '-i', '<(intermediate_dir)',
+                     '-o', '<(output_root)/<(project_path)',
+                     '-t', '<(outfile_type)',
+                     '-e', '<(extra_header)',
+                     '-s', '<(stubs_filename_root)',
+                     '-p', '<(project_path)',
+                     '<@(_inputs)',
+          ],
+          'process_outputs_as_sources': 1,
+          'message': 'Generating VideoToolbox stubs for dynamic loading',
+        },
+      ],
+    }],
+    ['OS=="android"', {
+      'dependencies': [
+        '../media/media.gyp:media',
+      ],
+      'sources': [
+        'gpu/android_copying_backing_strategy.cc',
+        'gpu/android_copying_backing_strategy.h',
+        'gpu/android_deferred_rendering_backing_strategy.cc',
+        'gpu/android_deferred_rendering_backing_strategy.h',
+        'gpu/android_video_decode_accelerator.cc',
+        'gpu/android_video_decode_accelerator.h',
+        'gpu/avda_codec_image.cc',
+        'gpu/avda_codec_image.h',
+        'gpu/avda_return_on_failure.h',
+        'gpu/avda_shared_state.cc',
+        'gpu/avda_shared_state.h',
+        'gpu/avda_state_provider.h',
+        'gpu/avda_surface_tracker.h',
+        'gpu/avda_surface_tracker.cc',
+      ],
+    }],
+    ['OS=="android" and enable_webrtc==1', {
+      'dependencies': [
+        '../third_party/libyuv/libyuv.gyp:libyuv',
+      ],
+      'sources': [
+        'gpu/android_video_encode_accelerator.cc',
+        'gpu/android_video_encode_accelerator.h',
+      ],
+    }],
+    ['use_v4lplugin==1 and chromeos==1', {
+      'defines': [
+        'USE_LIBV4L2'
+      ],
+      'variables': {
+        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
+        'extra_header': 'gpu/v4l2_stub_header.fragment',
+        'sig_files': ['gpu/v4l2.sig'],
+        'outfile_type': 'posix_stubs',
+        'stubs_filename_root': 'v4l2_stubs',
+        'project_path': 'media/gpu',
+        'intermediate_dir': '<(INTERMEDIATE_DIR)',
+        'output_root': '<(SHARED_INTERMEDIATE_DIR)/v4l2',
+      },
+      'include_dirs': [
+        '<(output_root)',
+      ],
+      'actions': [
+        {
+          'action_name': 'generate_stubs',
+          'inputs': [
+            '<(generate_stubs_script)',
+            '<(extra_header)',
+            '<@(sig_files)',
+          ],
+          'outputs': [
+            '<(intermediate_dir)/<(stubs_filename_root).cc',
+            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
+          ],
+          'action': ['python',
+            '<(generate_stubs_script)',
+            '-i', '<(intermediate_dir)',
+            '-o', '<(output_root)/<(project_path)',
+            '-t', '<(outfile_type)',
+            '-e', '<(extra_header)',
+            '-s', '<(stubs_filename_root)',
+            '-p', '<(project_path)',
+            '<@(_inputs)',
+          ],
+          'process_outputs_as_sources': 1,
+          'message': 'Generating libv4l2 stubs for dynamic loading',
+        },
+      ],
+    }],
+    ['chromeos==1', {
+      'sources': [
+        'gpu/accelerated_video_decoder.h',
+        'gpu/h264_decoder.cc',
+        'gpu/h264_decoder.h',
+        'gpu/h264_dpb.cc',
+        'gpu/h264_dpb.h',
+        'gpu/vp8_decoder.cc',
+        'gpu/vp8_decoder.h',
+        'gpu/vp8_picture.cc',
+        'gpu/vp8_picture.h',
+        'gpu/vp9_decoder.cc',
+        'gpu/vp9_decoder.h',
+        'gpu/vp9_picture.cc',
+        'gpu/vp9_picture.h',
+      ],
+    }],
+    ['chromeos==1 and use_v4l2_codec==1', {
+      'direct_dependent_settings': {
+        'defines': [
+          'USE_V4L2_CODEC'
+        ],
+      },
+      'defines': [
+        'USE_V4L2_CODEC'
+      ],
+      'dependencies': [
+        '../media/media.gyp:media',
+        '../third_party/libyuv/libyuv.gyp:libyuv',
+      ],
+      'sources': [
+        'gpu/generic_v4l2_device.cc',
+        'gpu/generic_v4l2_device.h',
+        'gpu/v4l2_device.cc',
+        'gpu/v4l2_device.h',
+        'gpu/v4l2_image_processor.cc',
+        'gpu/v4l2_image_processor.h',
+        'gpu/v4l2_jpeg_decode_accelerator.cc',
+        'gpu/v4l2_jpeg_decode_accelerator.h',
+        'gpu/v4l2_slice_video_decode_accelerator.cc',
+        'gpu/v4l2_slice_video_decode_accelerator.h',
+        'gpu/v4l2_video_decode_accelerator.cc',
+        'gpu/v4l2_video_decode_accelerator.h',
+        'gpu/v4l2_video_encode_accelerator.cc',
+        'gpu/v4l2_video_encode_accelerator.h',
+      ],
+      'include_dirs': [
+        '<(DEPTH)/third_party/khronos',
+      ],
+    }],
+    ['target_arch == "arm" and chromeos == 1', {
+      'sources': [
+        'gpu/tegra_v4l2_device.cc',
+        'gpu/tegra_v4l2_device.h',
+      ],
+    }],
+    ['target_arch != "arm" and chromeos == 1', {
+      'dependencies': [
+        '../media/media.gyp:media',
+        '../third_party/libyuv/libyuv.gyp:libyuv',
+      ],
+      'sources': [
+        'gpu/va_surface.h',
+        'gpu/vaapi_jpeg_decode_accelerator.cc',
+        'gpu/vaapi_jpeg_decode_accelerator.h',
+        'gpu/vaapi_jpeg_decoder.cc',
+        'gpu/vaapi_jpeg_decoder.h',
+        'gpu/vaapi_picture.cc',
+        'gpu/vaapi_picture.h',
+        'gpu/vaapi_video_decode_accelerator.cc',
+        'gpu/vaapi_video_decode_accelerator.h',
+        'gpu/vaapi_video_encode_accelerator.cc',
+        'gpu/vaapi_video_encode_accelerator.h',
+        'gpu/vaapi_wrapper.cc',
+        'gpu/vaapi_wrapper.h',
+      ],
+      'conditions': [
+        ['use_x11 == 1', {
+          'variables': {
+            'sig_files': [
+              'gpu/va.sigs',
+              'gpu/va_x11.sigs',
+            ],
+          },
+          'sources': [
+            'gpu/vaapi_tfp_picture.cc',
+            'gpu/vaapi_tfp_picture.h',
+          ],
+        }, {
+          'variables': {
+            'sig_files': [
+              'gpu/va.sigs',
+              'gpu/va_drm.sigs',
+            ],
+          },
+          'sources': [
+            'gpu/vaapi_drm_picture.cc',
+            'gpu/vaapi_drm_picture.h',
+          ],
+        }],
+      ],
+      'variables': {
+        'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
+        'extra_header': 'gpu/va_stub_header.fragment',
+        'outfile_type': 'posix_stubs',
+        'stubs_filename_root': 'va_stubs',
+        'project_path': 'media/gpu',
+        'intermediate_dir': '<(INTERMEDIATE_DIR)',
+        'output_root': '<(SHARED_INTERMEDIATE_DIR)/va',
+      },
+      'include_dirs': [
+        '<(DEPTH)/third_party/libva',
+        '<(DEPTH)/third_party/libyuv',
+        '<(output_root)',
+      ],
+      'actions': [
+        {
+          'action_name': 'generate_stubs',
+          'inputs': [
+            '<(generate_stubs_script)',
+            '<(extra_header)',
+            '<@(sig_files)',
+          ],
+          'outputs': [
+            '<(intermediate_dir)/<(stubs_filename_root).cc',
+            '<(output_root)/<(project_path)/<(stubs_filename_root).h',
+          ],
+          'action': ['python',
+                     '<(generate_stubs_script)',
+                     '-i', '<(intermediate_dir)',
+                     '-o', '<(output_root)/<(project_path)',
+                     '-t', '<(outfile_type)',
+                     '-e', '<(extra_header)',
+                     '-s', '<(stubs_filename_root)',
+                     '-p', '<(project_path)',
+                     '<@(_inputs)',
+          ],
+          'process_outputs_as_sources': 1,
+          'message': 'Generating libva stubs for dynamic loading',
+        },
+     ]
+    }],
+    ['OS=="win"', {
+      'dependencies': [
+        '../media/media.gyp:media',
+        '../ui/gl/gl.gyp:gl',
+      ],
+      'link_settings': {
+        'libraries': [
+           '-ld3d9.lib',
+           '-ld3d11.lib',
+           '-ldxva2.lib',
+           '-lstrmiids.lib',
+           '-lmf.lib',
+           '-lmfplat.lib',
+           '-lmfuuid.lib',
+        ],
+        'msvs_settings': {
+          'VCLinkerTool': {
+            'DelayLoadDLLs': [
+              'd3d9.dll',
+              'd3d11.dll',
+              'dxva2.dll',
+              'mf.dll',
+              'mfplat.dll',
+            ],
+          },
+        },
+      },
+      'sources': [
+        'gpu/dxva_video_decode_accelerator_win.cc',
+        'gpu/dxva_video_decode_accelerator_win.h',
+      ],
+      'include_dirs': [
+        '<(DEPTH)/third_party/khronos',
+      ],
+    }],
+    ['OS == "win" and target_arch == "x64"', {
+      'msvs_settings': {
+        'VCCLCompilerTool': {
+          'AdditionalOptions': [
+            '/wd4267', # Conversion from 'size_t' to 'type', possible loss of data
+          ],
+        },
+      },
+    }],
+  ],
+}
diff --git a/ui/gl/gl_bindings.h b/ui/gl/gl_bindings.h
index c973bed..d45b743a 100644
--- a/ui/gl/gl_bindings.h
+++ b/ui/gl/gl_bindings.h
@@ -10,6 +10,14 @@
 // the X11 headers on linux, which define all kinds of macros that are
 // liable to cause conflicts.
 
+// GL headers may include inttypes.h and so we need to ensure that
+// __STDC_FORMAT_MACROS is defined in order for //base/format_macros.h to
+// function correctly. See comment and #error message in //base/format_macros.h
+// for details.
+#if defined(OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
 #include <GL/gl.h>
 #include <GL/glext.h>
 #include <EGL/egl.h>