[go: nahoru, domu]

media: Drop "media::" in media/gpu

Now media/gpu is moved to media namespace, there's no need to use
media:: anymore.

BUG=586386

Review-Url: https://codereview.chromium.org/2061823003
Cr-Commit-Position: refs/heads/master@{#399790}
diff --git a/media/gpu/android_copying_backing_strategy.cc b/media/gpu/android_copying_backing_strategy.cc
index 9fd7034eb..3435fb7 100644
--- a/media/gpu/android_copying_backing_strategy.cc
+++ b/media/gpu/android_copying_backing_strategy.cc
@@ -28,7 +28,7 @@
 
 gl::ScopedJavaSurface AndroidCopyingBackingStrategy::Initialize(
     int surface_view_id) {
-  if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+  if (surface_view_id != VideoDecodeAccelerator::Config::kNoSurfaceID) {
     LOG(ERROR) << "The copying strategy should not be initialized with a "
                   "surface id.";
     return gl::ScopedJavaSurface();
@@ -66,7 +66,7 @@
 
 void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
     int32_t codec_buf_index,
-    const media::PictureBuffer& picture_buffer) {
+    const PictureBuffer& picture_buffer) {
   // Make sure that the decoder is available.
   RETURN_ON_FAILURE(state_provider_, state_provider_->GetGlDecoder().get(),
                     "Failed to get gles2 decoder instance.", ILLEGAL_STATE);
@@ -130,8 +130,7 @@
       true, false, false, transform_matrix);
 }
 
-void AndroidCopyingBackingStrategy::CodecChanged(
-    media::VideoCodecBridge* codec) {
+void AndroidCopyingBackingStrategy::CodecChanged(VideoCodecBridge* codec) {
   media_codec_ = codec;
 }
 
@@ -147,7 +146,7 @@
 }
 
 void AndroidCopyingBackingStrategy::UpdatePictureBufferSize(
-    media::PictureBuffer* picture_buffer,
+    PictureBuffer* picture_buffer,
     const gfx::Size& new_size) {
   // This strategy uses 2D textures who's allocated memory is dependent on the
   // size. To update size in all places, we must:
diff --git a/media/gpu/android_copying_backing_strategy.h b/media/gpu/android_copying_backing_strategy.h
index 35df4e0..5348a98 100644
--- a/media/gpu/android_copying_backing_strategy.h
+++ b/media/gpu/android_copying_backing_strategy.h
@@ -43,11 +43,11 @@
   gfx::Size GetPictureBufferSize() const override;
   void UseCodecBufferForPictureBuffer(
       int32_t codec_buffer_index,
-      const media::PictureBuffer& picture_buffer) override;
-  void CodecChanged(media::VideoCodecBridge* codec) override;
+      const PictureBuffer& picture_buffer) override;
+  void CodecChanged(VideoCodecBridge* codec) override;
   void OnFrameAvailable() override;
   bool ArePicturesOverlayable() override;
-  void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+  void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
                                const gfx::Size& new_size) override;
 
  private:
@@ -62,7 +62,7 @@
   // The texture id which is set to |surface_texture_|.
   uint32_t surface_texture_id_;
 
-  media::VideoCodecBridge* media_codec_;
+  VideoCodecBridge* media_codec_;
 };
 
 }  // namespace media
diff --git a/media/gpu/android_deferred_rendering_backing_strategy.cc b/media/gpu/android_deferred_rendering_backing_strategy.cc
index 69ff60dc..cd3ed0e 100644
--- a/media/gpu/android_deferred_rendering_backing_strategy.cc
+++ b/media/gpu/android_deferred_rendering_backing_strategy.cc
@@ -49,7 +49,7 @@
   UMA_HISTOGRAM_BOOLEAN("Media.AVDA.VirtualContext", using_virtual_context);
 
   // Acquire the SurfaceView surface if given a valid id.
-  if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+  if (surface_view_id != VideoDecodeAccelerator::Config::kNoSurfaceID) {
     return gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(
         surface_view_id);
   }
@@ -97,7 +97,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
-    const media::PictureBuffer& picture_buffer,
+    const PictureBuffer& picture_buffer,
     const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image) {
   gpu::gles2::TextureRef* texture_ref =
       state_provider_->GetTextureForPicture(picture_buffer);
@@ -145,7 +145,7 @@
 
 void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
     int32_t codec_buf_index,
-    const media::PictureBuffer& picture_buffer) {
+    const PictureBuffer& picture_buffer) {
   // Make sure that the decoder is available.
   RETURN_IF_NULL(state_provider_->GetGlDecoder());
 
@@ -165,7 +165,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer(
-    const media::PictureBuffer& picture_buffer,
+    const PictureBuffer& picture_buffer,
     bool have_context) {
   // Attach a GLImage to each texture that will use the surface texture.
   // We use a refptr here in case SetImageForPicture fails.
@@ -192,7 +192,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture(
-    const media::PictureBuffer& picture_buffer) {
+    const PictureBuffer& picture_buffer) {
   AVDACodecImage* avda_image =
       shared_state_->GetImageForPicture(picture_buffer.id());
   RETURN_IF_NULL(avda_image);
@@ -200,7 +200,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer(
-    const media::PictureBuffer& picture_buffer) {
+    const PictureBuffer& picture_buffer) {
   pictures_out_for_display_.erase(
       std::remove(pictures_out_for_display_.begin(),
                   pictures_out_for_display_.end(), picture_buffer.id()),
@@ -216,7 +216,7 @@
 
 void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBuffers(
     const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
-  for (const std::pair<int, media::PictureBuffer>& entry : buffers)
+  for (const std::pair<int, PictureBuffer>& entry : buffers)
     ReleaseCodecBufferForPicture(entry.second);
 }
 
@@ -276,7 +276,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::CodecChanged(
-    media::VideoCodecBridge* codec) {
+    VideoCodecBridge* codec) {
   media_codec_ = codec;
   shared_state_->CodecChanged(codec);
 }
@@ -292,7 +292,7 @@
 }
 
 void AndroidDeferredRenderingBackingStrategy::UpdatePictureBufferSize(
-    media::PictureBuffer* picture_buffer,
+    PictureBuffer* picture_buffer,
     const gfx::Size& new_size) {
   // This strategy uses EGL images which manage the texture size for us.  We
   // simply update the PictureBuffer meta-data and leave the texture as-is.
diff --git a/media/gpu/android_deferred_rendering_backing_strategy.h b/media/gpu/android_deferred_rendering_backing_strategy.h
index e452c90..784e6f1 100644
--- a/media/gpu/android_deferred_rendering_backing_strategy.h
+++ b/media/gpu/android_deferred_rendering_backing_strategy.h
@@ -49,29 +49,28 @@
   gfx::Size GetPictureBufferSize() const override;
   void UseCodecBufferForPictureBuffer(
       int32_t codec_buffer_index,
-      const media::PictureBuffer& picture_buffer) override;
-  void AssignOnePictureBuffer(const media::PictureBuffer&, bool) override;
-  void ReuseOnePictureBuffer(
-      const media::PictureBuffer& picture_buffer) override;
+      const PictureBuffer& picture_buffer) override;
+  void AssignOnePictureBuffer(const PictureBuffer&, bool) override;
+  void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer) override;
   void MaybeRenderEarly() override;
-  void CodecChanged(media::VideoCodecBridge* codec) override;
+  void CodecChanged(VideoCodecBridge* codec) override;
   void ReleaseCodecBuffers(
       const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) override;
   void OnFrameAvailable() override;
   bool ArePicturesOverlayable() override;
-  void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+  void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
                                const gfx::Size& new_size) override;
 
  private:
   // Release any codec buffer that is associated with the given picture buffer
   // back to the codec.  It is okay if there is no such buffer.
-  void ReleaseCodecBufferForPicture(const media::PictureBuffer& picture_buffer);
+  void ReleaseCodecBufferForPicture(const PictureBuffer& picture_buffer);
 
   // Sets up the texture references (as found by |picture_buffer|), for the
   // specified |image|. If |image| is null, clears any ref on the texture
   // associated with |picture_buffer|.
   void SetImageForPicture(
-      const media::PictureBuffer& picture_buffer,
+      const PictureBuffer& picture_buffer,
       const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image);
 
   // Make a copy of the SurfaceTexture's front buffer and associate all given
@@ -93,7 +92,7 @@
   // we're not rendering to a SurfaceView.
   scoped_refptr<gl::SurfaceTexture> surface_texture_;
 
-  media::VideoCodecBridge* media_codec_;
+  VideoCodecBridge* media_codec_;
 
   // Picture buffer IDs that are out for display. Stored in order of frames as
   // they are returned from the decoder.
diff --git a/media/gpu/android_video_decode_accelerator.cc b/media/gpu/android_video_decode_accelerator.cc
index e14c803e..b821aef3 100644
--- a/media/gpu/android_video_decode_accelerator.cc
+++ b/media/gpu/android_video_decode_accelerator.cc
@@ -45,15 +45,15 @@
 #include "media/mojo/services/mojo_cdm_service.h"
 #endif
 
-#define POST_ERROR(error_code, error_message)                        \
-  do {                                                               \
-    DLOG(ERROR) << error_message;                                    \
-    PostError(FROM_HERE, media::VideoDecodeAccelerator::error_code); \
+#define POST_ERROR(error_code, error_message)                 \
+  do {                                                        \
+    DLOG(ERROR) << error_message;                             \
+    PostError(FROM_HERE, VideoDecodeAccelerator::error_code); \
   } while (0)
 
 namespace media {
 
-enum { kNumPictureBuffers = media::limits::kMaxVideoFrames + 1 };
+enum { kNumPictureBuffers = limits::kMaxVideoFrames + 1 };
 
 // Max number of bitstreams notified to the client with
 // NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
@@ -63,18 +63,18 @@
 // support others. Advertise support for all H264 profiles and let the
 // MediaCodec fail when decoding if it's not actually supported. It's assumed
 // that consumers won't have software fallback for H264 on Android anyway.
-static const media::VideoCodecProfile kSupportedH264Profiles[] = {
-    media::H264PROFILE_BASELINE,
-    media::H264PROFILE_MAIN,
-    media::H264PROFILE_EXTENDED,
-    media::H264PROFILE_HIGH,
-    media::H264PROFILE_HIGH10PROFILE,
-    media::H264PROFILE_HIGH422PROFILE,
-    media::H264PROFILE_HIGH444PREDICTIVEPROFILE,
-    media::H264PROFILE_SCALABLEBASELINE,
-    media::H264PROFILE_SCALABLEHIGH,
-    media::H264PROFILE_STEREOHIGH,
-    media::H264PROFILE_MULTIVIEWHIGH};
+static const VideoCodecProfile kSupportedH264Profiles[] = {
+    H264PROFILE_BASELINE,
+    H264PROFILE_MAIN,
+    H264PROFILE_EXTENDED,
+    H264PROFILE_HIGH,
+    H264PROFILE_HIGH10PROFILE,
+    H264PROFILE_HIGH422PROFILE,
+    H264PROFILE_HIGH444PREDICTIVEPROFILE,
+    H264PROFILE_SCALABLEBASELINE,
+    H264PROFILE_SCALABLEHIGH,
+    H264PROFILE_STEREOHIGH,
+    H264PROFILE_MULTIVIEWHIGH};
 
 // Because MediaCodec is thread-hostile (must be poked on a single thread) and
 // has no callback mechanism (b/11990118), we must drive it by polling for
@@ -369,7 +369,7 @@
 AndroidVideoDecodeAccelerator::CodecConfig::~CodecConfig() {}
 
 AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
-    const media::BitstreamBuffer& bitstream_buffer)
+    const BitstreamBuffer& bitstream_buffer)
     : buffer(bitstream_buffer) {
   if (buffer.id() != -1)
     memory.reset(new SharedMemoryRegion(buffer, true));
@@ -452,22 +452,22 @@
     return false;
   }
 
-  if (codec_config_->codec_ != media::kCodecVP8 &&
-      codec_config_->codec_ != media::kCodecVP9 &&
-      codec_config_->codec_ != media::kCodecH264) {
+  if (codec_config_->codec_ != kCodecVP8 &&
+      codec_config_->codec_ != kCodecVP9 &&
+      codec_config_->codec_ != kCodecH264) {
     LOG(ERROR) << "Unsupported profile: " << config.profile;
     return false;
   }
 
   // Only use MediaCodec for VP8/9 if it's likely backed by hardware
   // or if the stream is encrypted.
-  if ((codec_config_->codec_ == media::kCodecVP8 ||
-       codec_config_->codec_ == media::kCodecVP9) &&
+  if ((codec_config_->codec_ == kCodecVP8 ||
+       codec_config_->codec_ == kCodecVP9) &&
       !config_.is_encrypted &&
-      media::VideoCodecBridge::IsKnownUnaccelerated(
-          codec_config_->codec_, media::MEDIA_CODEC_DECODER)) {
+      VideoCodecBridge::IsKnownUnaccelerated(codec_config_->codec_,
+                                             MEDIA_CODEC_DECODER)) {
     DVLOG(1) << "Initialization failed: "
-             << (codec_config_->codec_ == media::kCodecVP8 ? "vp8" : "vp9")
+             << (codec_config_->codec_ == kCodecVP8 ? "vp8" : "vp9")
              << " is not hardware accelerated";
     return false;
   }
@@ -604,15 +604,15 @@
   // That status does not return this buffer back to the pool of
   // available input buffers. We have to reuse it in QueueSecureInputBuffer().
   if (input_buf_index == -1) {
-    media::MediaCodecStatus status =
+    MediaCodecStatus status =
         media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
     switch (status) {
-      case media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+      case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
         return false;
-      case media::MEDIA_CODEC_ERROR:
+      case MEDIA_CODEC_ERROR:
         POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer");
         return false;
-      case media::MEDIA_CODEC_OK:
+      case MEDIA_CODEC_OK:
         break;
       default:
         NOTREACHED() << "Unknown DequeueInputBuffer status " << status;
@@ -622,8 +622,7 @@
 
   DCHECK_NE(input_buf_index, -1);
 
-  media::BitstreamBuffer bitstream_buffer =
-      pending_bitstream_records_.front().buffer;
+  BitstreamBuffer bitstream_buffer = pending_bitstream_records_.front().buffer;
 
   if (bitstream_buffer.id() == -1) {
     pending_bitstream_records_.pop();
@@ -650,7 +649,7 @@
 
   const base::TimeDelta presentation_timestamp =
       bitstream_buffer.presentation_timestamp();
-  DCHECK(presentation_timestamp != media::kNoTimestamp())
+  DCHECK(presentation_timestamp != kNoTimestamp())
       << "Bitstream buffers must have valid presentation timestamps";
 
   // There may already be a bitstream buffer with this timestamp, e.g., VP9 alt
@@ -667,10 +666,9 @@
       shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr;
   const std::string& key_id = bitstream_buffer.key_id();
   const std::string& iv = bitstream_buffer.iv();
-  const std::vector<media::SubsampleEntry>& subsamples =
-      bitstream_buffer.subsamples();
+  const std::vector<SubsampleEntry>& subsamples = bitstream_buffer.subsamples();
 
-  media::MediaCodecStatus status;
+  MediaCodecStatus status;
   if (key_id.empty() || iv.empty()) {
     status = media_codec_->QueueInputBuffer(input_buf_index, memory,
                                             bitstream_buffer.size(),
@@ -685,7 +683,7 @@
            << ": Queue(Secure)InputBuffer: pts:" << presentation_timestamp
            << " status:" << status;
 
-  if (status == media::MEDIA_CODEC_NO_KEY) {
+  if (status == MEDIA_CODEC_NO_KEY) {
     // Keep trying to enqueue the same input buffer.
     // The buffer is owned by us (not the MediaCodec) and is filled with data.
     DVLOG(1) << "QueueSecureInputBuffer failed: NO_KEY";
@@ -711,7 +709,7 @@
                  weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
   bitstreams_notified_in_advance_.push_back(bitstream_buffer.id());
 
-  if (status != media::MEDIA_CODEC_OK) {
+  if (status != MEDIA_CODEC_OK) {
     POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status);
     return false;
   }
@@ -740,7 +738,7 @@
     size_t size = 0;
 
     TRACE_EVENT_BEGIN0("media", "AVDA::DequeueOutput");
-    media::MediaCodecStatus status = media_codec_->DequeueOutputBuffer(
+    MediaCodecStatus status = media_codec_->DequeueOutputBuffer(
         NoWaitTimeOut(), &buf_index, &offset, &size, &presentation_timestamp,
         &eos, NULL);
     TRACE_EVENT_END2("media", "AVDA::DequeueOutput", "status", status,
@@ -748,7 +746,7 @@
                      presentation_timestamp.InMilliseconds());
 
     switch (status) {
-      case media::MEDIA_CODEC_ERROR:
+      case MEDIA_CODEC_ERROR:
         // Do not post an error if we are draining for reset and destroy.
         // Instead, run the drain completion task.
         if (IsDrainingForResetOrDestroy()) {
@@ -760,17 +758,17 @@
         }
         return false;
 
-      case media::MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
+      case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
         return false;
 
-      case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
+      case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
         // An OUTPUT_FORMAT_CHANGED is not reported after flush() if the frame
         // size does not change. Therefore we have to keep track on the format
         // even if draining, unless we are draining for destroy.
         if (drain_type_ == DRAIN_FOR_DESTROY)
           return true;  // ignore
 
-        if (media_codec_->GetOutputSize(&size_) != media::MEDIA_CODEC_OK) {
+        if (media_codec_->GetOutputSize(&size_) != MEDIA_CODEC_OK) {
           POST_ERROR(PLATFORM_FAILURE, "GetOutputSize failed.");
           return false;
         }
@@ -797,10 +795,10 @@
         return true;
       }
 
-      case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+      case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
         break;
 
-      case media::MEDIA_CODEC_OK:
+      case MEDIA_CODEC_OK:
         DCHECK_GE(buf_index, 0);
         DVLOG(3) << __FUNCTION__ << ": pts:" << presentation_timestamp
                  << " buf_index:" << buf_index << " offset:" << offset
@@ -904,8 +902,8 @@
   }
 
   const bool allow_overlay = strategy_->ArePicturesOverlayable();
-  media::Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
-                         allow_overlay);
+  Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
+                  allow_overlay);
   picture.set_size_changed(size_changed);
 
   // Notify picture ready before calling UseCodecBufferForPictureBuffer() since
@@ -920,7 +918,7 @@
 }
 
 void AndroidVideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DCHECK(thread_checker_.CalledOnValidThread());
 
   // If we previously deferred a codec restart, take care of it now. This can
@@ -950,7 +948,7 @@
 }
 
 void AndroidVideoDecodeAccelerator::DecodeBuffer(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   pending_bitstream_records_.push(BitstreamRecord(bitstream_buffer));
   TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
                  pending_bitstream_records_.size());
@@ -967,7 +965,7 @@
 }
 
 void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DCHECK(thread_checker_.CalledOnValidThread());
   DCHECK(output_picture_buffers_.empty());
   DCHECK(free_picture_ids_.empty());
@@ -1063,13 +1061,13 @@
 
 bool AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() {
   state_ = WAITING_FOR_CODEC;
-  std::unique_ptr<media::VideoCodecBridge> media_codec =
+  std::unique_ptr<VideoCodecBridge> media_codec =
       ConfigureMediaCodecOnAnyThread(codec_config_);
   OnCodecConfigured(std::move(media_codec));
   return !!media_codec_;
 }
 
-std::unique_ptr<media::VideoCodecBridge>
+std::unique_ptr<VideoCodecBridge>
 AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread(
     scoped_refptr<CodecConfig> codec_config) {
   TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec");
@@ -1081,15 +1079,14 @@
   // |needs_protected_surface_| implies encrypted stream.
   DCHECK(!codec_config->needs_protected_surface_ || media_crypto);
 
-  return std::unique_ptr<media::VideoCodecBridge>(
-      media::VideoCodecBridge::CreateDecoder(
-          codec_config->codec_, codec_config->needs_protected_surface_,
-          codec_config->initial_expected_coded_size_,
-          codec_config->surface_.j_surface().obj(), media_crypto, true));
+  return std::unique_ptr<VideoCodecBridge>(VideoCodecBridge::CreateDecoder(
+      codec_config->codec_, codec_config->needs_protected_surface_,
+      codec_config->initial_expected_coded_size_,
+      codec_config->surface_.j_surface().obj(), media_crypto, true));
 }
 
 void AndroidVideoDecodeAccelerator::OnCodecConfigured(
-    std::unique_ptr<media::VideoCodecBridge> media_codec) {
+    std::unique_ptr<VideoCodecBridge> media_codec) {
   DCHECK(thread_checker_.CalledOnValidThread());
   DCHECK(state_ == WAITING_FOR_CODEC || state_ == SURFACE_DESTROYED);
 
@@ -1138,7 +1135,7 @@
   drain_type_ = drain_type;
 
   if (enqueue_eos)
-    DecodeBuffer(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
+    DecodeBuffer(BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
 }
 
 bool AndroidVideoDecodeAccelerator::IsDrainingForResetOrDestroy() const {
@@ -1271,7 +1268,7 @@
 
   // Some VP8 files require complete MediaCodec drain before we can call
   // MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963.
-  if (media_codec_ && codec_config_->codec_ == media::kCodecVP8) {
+  if (media_codec_ && codec_config_->codec_ == kCodecVP8) {
     // Postpone ResetCodecState() after the drain.
     StartCodecDrain(DRAIN_FOR_RESET);
   } else {
@@ -1303,7 +1300,7 @@
 
   // Some VP8 files require complete MediaCodec drain before we can call
   // MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963.
-  if (media_codec_ && codec_config_->codec_ == media::kCodecVP8) {
+  if (media_codec_ && codec_config_->codec_ == kCodecVP8) {
     // Clear pending_bitstream_records_.
     while (!pending_bitstream_records_.empty())
       pending_bitstream_records_.pop();
@@ -1360,7 +1357,7 @@
 }
 
 gpu::gles2::TextureRef* AndroidVideoDecodeAccelerator::GetTextureForPicture(
-    const media::PictureBuffer& picture_buffer) {
+    const PictureBuffer& picture_buffer) {
   auto gles_decoder = GetGlDecoder();
   RETURN_ON_FAILURE(this, gles_decoder, "Failed to get GL decoder",
                     ILLEGAL_STATE, nullptr);
@@ -1439,7 +1436,7 @@
 
 void AndroidVideoDecodeAccelerator::PostError(
     const ::tracked_objects::Location& from_here,
-    media::VideoDecodeAccelerator::Error error) {
+    VideoDecodeAccelerator::Error error) {
   base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
       from_here,
       base::Bind(&AndroidVideoDecodeAccelerator::NotifyError,
@@ -1457,11 +1454,11 @@
 #else
   // Store the CDM to hold a reference to it.
   cdm_for_reference_holding_only_ =
-      media::MojoCdmService::LegacyGetCdm(config_.cdm_id);
+      MojoCdmService::LegacyGetCdm(config_.cdm_id);
   DCHECK(cdm_for_reference_holding_only_);
 
   // On Android platform the CdmContext must be a MediaDrmBridgeCdmContext.
-  media_drm_bridge_cdm_context_ = static_cast<media::MediaDrmBridgeCdmContext*>(
+  media_drm_bridge_cdm_context_ = static_cast<MediaDrmBridgeCdmContext*>(
       cdm_for_reference_holding_only_->GetCdmContext());
   DCHECK(media_drm_bridge_cdm_context_);
 
@@ -1474,20 +1471,19 @@
   // called.
   // TODO(xhwang): Remove |cdm_unset_cb| after it's not used on all platforms.
   cdm_registration_id_ = media_drm_bridge_cdm_context_->RegisterPlayer(
-      media::BindToCurrentLoop(
-          base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded,
-                     weak_this_factory_.GetWeakPtr())),
+      BindToCurrentLoop(base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded,
+                                   weak_this_factory_.GetWeakPtr())),
       base::Bind(&base::DoNothing));
 
   // Deferred initialization will continue in OnMediaCryptoReady().
-  media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
+  media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(BindToCurrentLoop(
       base::Bind(&AndroidVideoDecodeAccelerator::OnMediaCryptoReady,
                  weak_this_factory_.GetWeakPtr())));
 #endif  // !defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
 }
 
 void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
-    media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
+    MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
     bool needs_protected_surface) {
   DVLOG(1) << __FUNCTION__;
 
@@ -1526,8 +1522,7 @@
     client_->NotifyInitializationComplete(success);
 }
 
-void AndroidVideoDecodeAccelerator::NotifyPictureReady(
-    const media::Picture& picture) {
+void AndroidVideoDecodeAccelerator::NotifyPictureReady(const Picture& picture) {
   if (client_)
     client_->PictureReady(picture);
 }
@@ -1549,7 +1544,7 @@
 }
 
 void AndroidVideoDecodeAccelerator::NotifyError(
-    media::VideoDecodeAccelerator::Error error,
+    VideoDecodeAccelerator::Error error,
     int token) {
   DVLOG(1) << __FUNCTION__ << ": error: " << error << " token: " << token
            << " current: " << error_sequence_token_;
@@ -1589,7 +1584,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::Capabilities
+VideoDecodeAccelerator::Capabilities
 AndroidVideoDecodeAccelerator::GetCapabilities(
     const gpu::GpuPreferences& gpu_preferences) {
   Capabilities capabilities;
@@ -1600,9 +1595,9 @@
   // us to completely drain the decoder before releasing it, which is difficult
   // and time consuming to do while the surface is being destroyed.
   if (base::android::BuildInfo::GetInstance()->sdk_int() >= 18 &&
-      media::MediaCodecUtil::IsVp8DecoderAvailable()) {
+      MediaCodecUtil::IsVp8DecoderAvailable()) {
     SupportedProfile profile;
-    profile.profile = media::VP8PROFILE_ANY;
+    profile.profile = VP8PROFILE_ANY;
     profile.min_resolution.SetSize(0, 0);
     profile.max_resolution.SetSize(3840, 2160);
     // If we know MediaCodec will just create a software codec, prefer our
@@ -1610,12 +1605,12 @@
     // within the renderer sandbox. However if the content is encrypted, we
     // must use MediaCodec anyways since MediaDrm offers no way to decrypt
     // the buffers and let us use our internal software decoders.
-    profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
-        media::kCodecVP8, media::MEDIA_CODEC_DECODER);
+    profile.encrypted_only =
+        VideoCodecBridge::IsKnownUnaccelerated(kCodecVP8, MEDIA_CODEC_DECODER);
     profiles.push_back(profile);
   }
 
-  if (media::MediaCodecUtil::IsVp9DecoderAvailable()) {
+  if (MediaCodecUtil::IsVp9DecoderAvailable()) {
     SupportedProfile profile;
     profile.min_resolution.SetSize(0, 0);
     profile.max_resolution.SetSize(3840, 2160);
@@ -1624,15 +1619,15 @@
     // within the renderer sandbox. However if the content is encrypted, we
     // must use MediaCodec anyways since MediaDrm offers no way to decrypt
     // the buffers and let us use our internal software decoders.
-    profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
-        media::kCodecVP9, media::MEDIA_CODEC_DECODER);
-    profile.profile = media::VP9PROFILE_PROFILE0;
+    profile.encrypted_only =
+        VideoCodecBridge::IsKnownUnaccelerated(kCodecVP9, MEDIA_CODEC_DECODER);
+    profile.profile = VP9PROFILE_PROFILE0;
     profiles.push_back(profile);
-    profile.profile = media::VP9PROFILE_PROFILE1;
+    profile.profile = VP9PROFILE_PROFILE1;
     profiles.push_back(profile);
-    profile.profile = media::VP9PROFILE_PROFILE2;
+    profile.profile = VP9PROFILE_PROFILE2;
     profiles.push_back(profile);
-    profile.profile = media::VP9PROFILE_PROFILE3;
+    profile.profile = VP9PROFILE_PROFILE3;
     profiles.push_back(profile);
   }
 
@@ -1647,13 +1642,13 @@
     profiles.push_back(profile);
   }
 
-  capabilities.flags = media::VideoDecodeAccelerator::Capabilities::
-      SUPPORTS_DEFERRED_INITIALIZATION;
+  capabilities.flags =
+      VideoDecodeAccelerator::Capabilities::SUPPORTS_DEFERRED_INITIALIZATION;
   if (UseDeferredRenderingStrategy(gpu_preferences)) {
-    capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
+    capabilities.flags |= VideoDecodeAccelerator::Capabilities::
         NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE;
-    if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) {
-      capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
+    if (MediaCodecUtil::IsSurfaceViewOutputSupported()) {
+      capabilities.flags |= VideoDecodeAccelerator::Capabilities::
           SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
     }
   }
diff --git a/media/gpu/android_video_decode_accelerator.h b/media/gpu/android_video_decode_accelerator.h
index 6885bdaeb..e5cc820 100644
--- a/media/gpu/android_video_decode_accelerator.h
+++ b/media/gpu/android_video_decode_accelerator.h
@@ -43,10 +43,10 @@
 // It delegates attaching pictures to PictureBuffers to a BackingStrategy, but
 // otherwise handles the work of transferring data to / from MediaCodec.
 class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator,
+    : public VideoDecodeAccelerator,
       public AVDAStateProvider {
  public:
-  using OutputBufferMap = std::map<int32_t, media::PictureBuffer>;
+  using OutputBufferMap = std::map<int32_t, PictureBuffer>;
 
   // A BackingStrategy is responsible for making a PictureBuffer's texture
   // contain the image that a MediaCodec decoder buffer tells it to.
@@ -79,16 +79,14 @@
     // the decoded output buffer at codec_buffer_index.
     virtual void UseCodecBufferForPictureBuffer(
         int32_t codec_buffer_index,
-        const media::PictureBuffer& picture_buffer) = 0;
+        const PictureBuffer& picture_buffer) = 0;
 
     // Notify strategy that a picture buffer has been assigned.
-    virtual void AssignOnePictureBuffer(
-        const media::PictureBuffer& picture_buffer,
-        bool have_context) {}
+    virtual void AssignOnePictureBuffer(const PictureBuffer& picture_buffer,
+                                        bool have_context) {}
 
     // Notify strategy that a picture buffer has been reused.
-    virtual void ReuseOnePictureBuffer(
-        const media::PictureBuffer& picture_buffer) {}
+    virtual void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer) {}
 
     // Release MediaCodec buffers.
     virtual void ReleaseCodecBuffers(
@@ -100,7 +98,7 @@
     // Notify strategy that we have a new android MediaCodec instance.  This
     // happens when we're starting up or re-configuring mid-stream.  Any
     // previously provided codec should no longer be referenced.
-    virtual void CodecChanged(media::VideoCodecBridge* codec) = 0;
+    virtual void CodecChanged(VideoCodecBridge* codec) = 0;
 
     // Notify the strategy that a frame is available.  This callback can happen
     // on any thread at any time.
@@ -114,7 +112,7 @@
     // |new_size| and also update any size-dependent state (e.g. size of
     // associated texture). Callers should set the correct GL context prior to
     // calling.
-    virtual void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+    virtual void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
                                          const gfx::Size& new_size) = 0;
   };
 
@@ -124,11 +122,10 @@
 
   ~AndroidVideoDecodeAccelerator() override;
 
-  // media::VideoDecodeAccelerator implementation:
+  // VideoDecodeAccelerator implementation:
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
@@ -143,13 +140,13 @@
   const base::ThreadChecker& ThreadChecker() const override;
   base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const override;
   gpu::gles2::TextureRef* GetTextureForPicture(
-      const media::PictureBuffer& picture_buffer) override;
+      const PictureBuffer& picture_buffer) override;
   scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
       GLuint* service_id) override;
   void PostError(const ::tracked_objects::Location& from_here,
-                 media::VideoDecodeAccelerator::Error error) override;
+                 VideoDecodeAccelerator::Error error) override;
 
-  static media::VideoDecodeAccelerator::Capabilities GetCapabilities(
+  static VideoDecodeAccelerator::Capabilities GetCapabilities(
       const gpu::GpuPreferences& gpu_preferences);
 
   // Notifies about SurfaceTexture::OnFrameAvailable.  This can happen on any
@@ -189,7 +186,7 @@
     CodecConfig();
 
     // Codec type. Used when we configure media codec.
-    media::VideoCodec codec_ = media::kUnknownVideoCodec;
+    VideoCodec codec_ = kUnknownVideoCodec;
 
     // Whether encryption scheme requires to use protected surface.
     bool needs_protected_surface_ = false;
@@ -200,7 +197,7 @@
 
     // The MediaCrypto object is used in the MediaCodec.configure() in case of
     // an encrypted stream.
-    media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto_;
+    MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto_;
 
     // Initial coded size.  The actual size might change at any time, so this
     // is only a hint.
@@ -245,12 +242,12 @@
 
   // Instantiate a media codec using |codec_config|.
   // This may be called on any thread.
-  static std::unique_ptr<media::VideoCodecBridge>
-  ConfigureMediaCodecOnAnyThread(scoped_refptr<CodecConfig> codec_config);
+  static std::unique_ptr<VideoCodecBridge> ConfigureMediaCodecOnAnyThread(
+      scoped_refptr<CodecConfig> codec_config);
 
   // Called on the main thread to update |media_codec_| and complete codec
   // configuration.  |media_codec| will be null if configuration failed.
-  void OnCodecConfigured(std::unique_ptr<media::VideoCodecBridge> media_codec);
+  void OnCodecConfigured(std::unique_ptr<VideoCodecBridge> media_codec);
 
   // Sends the decoded frame specified by |codec_buffer_index| to the client.
   void SendDecodedFrameToClient(int32_t codec_buffer_index,
@@ -276,15 +273,14 @@
 
   // Decode the content in the |bitstream_buffer|. Note that a
   // |bitstream_buffer| of id as -1 indicates a flush command.
-  void DecodeBuffer(const media::BitstreamBuffer& bitstream_buffer);
+  void DecodeBuffer(const BitstreamBuffer& bitstream_buffer);
 
   // Called during Initialize() for encrypted streams to set up the CDM.
   void InitializeCdm();
 
   // Called after the CDM obtains a MediaCrypto object.
-  void OnMediaCryptoReady(
-      media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
-      bool needs_protected_surface);
+  void OnMediaCryptoReady(MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
+                          bool needs_protected_surface);
 
   // Called when a new key is added to the CDM.
   void OnKeyAdded();
@@ -293,7 +289,7 @@
   void NotifyInitializationComplete(bool success);
 
   // Notifies the client about the availability of a picture.
-  void NotifyPictureReady(const media::Picture& picture);
+  void NotifyPictureReady(const Picture& picture);
 
   // Notifies the client that the input buffer identifed by input_buffer_id has
   // been processed.
@@ -311,7 +307,7 @@
   // from breaking.  NotifyError will do so immediately, PostError may wait.
   // |token| has to match |error_sequence_token_|, or else it's assumed to be
   // from a post that's prior to a previous reset, and ignored.
-  void NotifyError(media::VideoDecodeAccelerator::Error error, int token);
+  void NotifyError(VideoDecodeAccelerator::Error error, int token);
 
   // Start or stop our work-polling timer based on whether we did any work, and
   // how long it has been since we've done work.  Calling this with true will
@@ -373,7 +369,7 @@
   std::queue<int32_t> free_picture_ids_;
 
   // The low-level decoder which Android SDK provides.
-  std::unique_ptr<media::VideoCodecBridge> media_codec_;
+  std::unique_ptr<VideoCodecBridge> media_codec_;
 
   // Set to true after requesting picture buffers to the client.
   bool picturebuffers_requested_;
@@ -385,11 +381,11 @@
   // if any.  The goal is to prevent leaving a BitstreamBuffer's shared memory
   // handle open.
   struct BitstreamRecord {
-    BitstreamRecord(const media::BitstreamBuffer&);
+    BitstreamRecord(const BitstreamBuffer&);
     BitstreamRecord(BitstreamRecord&& other);
     ~BitstreamRecord();
 
-    media::BitstreamBuffer buffer;
+    BitstreamBuffer buffer;
 
     // |memory| is not mapped, and may be null if buffer has no data.
     std::unique_ptr<SharedMemoryRegion> memory;
@@ -424,9 +420,9 @@
   DrainType drain_type_;
 
   // Holds a ref-count to the CDM to avoid using the CDM after it's destroyed.
-  scoped_refptr<media::MediaKeys> cdm_for_reference_holding_only_;
+  scoped_refptr<MediaKeys> cdm_for_reference_holding_only_;
 
-  media::MediaDrmBridgeCdmContext* media_drm_bridge_cdm_context_;
+  MediaDrmBridgeCdmContext* media_drm_bridge_cdm_context_;
 
   // MediaDrmBridge requires registration/unregistration of the player, this
   // registration id is used for this.
diff --git a/media/gpu/android_video_decode_accelerator_unittest.cc b/media/gpu/android_video_decode_accelerator_unittest.cc
index d8a119c..aaa7afc 100644
--- a/media/gpu/android_video_decode_accelerator_unittest.cc
+++ b/media/gpu/android_video_decode_accelerator_unittest.cc
@@ -37,8 +37,7 @@
 
 namespace media {
 
-class MockVideoDecodeAcceleratorClient
-    : public media::VideoDecodeAccelerator::Client {
+class MockVideoDecodeAcceleratorClient : public VideoDecodeAccelerator::Client {
  public:
   MockVideoDecodeAcceleratorClient() {}
   ~MockVideoDecodeAcceleratorClient() override {}
@@ -50,11 +49,11 @@
                              const gfx::Size& dimensions,
                              uint32_t texture_target) override {}
   void DismissPictureBuffer(int32_t picture_buffer_id) override {}
-  void PictureReady(const media::Picture& picture) override {}
+  void PictureReady(const Picture& picture) override {}
   void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override {}
   void NotifyFlushDone() override {}
   void NotifyResetDone() override {}
-  void NotifyError(media::VideoDecodeAccelerator::Error error) override {}
+  void NotifyError(VideoDecodeAccelerator::Error error) override {}
 };
 
 class AndroidVideoDecodeAcceleratorTest : public testing::Test {
@@ -64,7 +63,7 @@
  protected:
   void SetUp() override {
     JNIEnv* env = base::android::AttachCurrentThread();
-    media::RegisterJni(env);
+    RegisterJni(env);
 
     // Start message loop because
     // AndroidVideoDecodeAccelerator::ConfigureMediaCodec() starts a timer task.
@@ -79,7 +78,7 @@
         base::Bind(&MockGetGLES2Decoder, decoder->AsWeakPtr())));
   }
 
-  bool Configure(media::VideoCodec codec) {
+  bool Configure(VideoCodec codec) {
     AndroidVideoDecodeAccelerator* accelerator =
         static_cast<AndroidVideoDecodeAccelerator*>(accelerator_.get());
     scoped_refptr<gl::SurfaceTexture> surface_texture =
@@ -91,18 +90,18 @@
   }
 
  private:
-  std::unique_ptr<media::VideoDecodeAccelerator> accelerator_;
+  std::unique_ptr<VideoDecodeAccelerator> accelerator_;
   std::unique_ptr<base::MessageLoop> message_loop_;
 };
 
 TEST_F(AndroidVideoDecodeAcceleratorTest, ConfigureUnsupportedCodec) {
-  EXPECT_FALSE(Configure(media::kUnknownVideoCodec));
+  EXPECT_FALSE(Configure(kUnknownVideoCodec));
 }
 
 TEST_F(AndroidVideoDecodeAcceleratorTest, ConfigureSupportedCodec) {
-  if (!media::MediaCodecUtil::IsMediaCodecAvailable())
+  if (!MediaCodecUtil::IsMediaCodecAvailable())
     return;
-  EXPECT_TRUE(Configure(media::kCodecVP8));
+  EXPECT_TRUE(Configure(kCodecVP8));
 }
 
 }  // namespace media
diff --git a/media/gpu/android_video_encode_accelerator.cc b/media/gpu/android_video_encode_accelerator.cc
index efe0016..62c320f 100644
--- a/media/gpu/android_video_encode_accelerator.cc
+++ b/media/gpu/android_video_encode_accelerator.cc
@@ -25,9 +25,6 @@
 #include "ui/gl/android/scoped_java_surface.h"
 #include "ui/gl/gl_bindings.h"
 
-using media::VideoCodecBridge;
-using media::VideoFrame;
-
 namespace media {
 
 // Limit default max video codec size for Android to avoid
@@ -85,7 +82,7 @@
   if (mime.empty())
     return false;
 
-  std::set<int> formats = media::MediaCodecUtil::GetEncoderColorFormats(mime);
+  std::set<int> formats = MediaCodecUtil::GetEncoderColorFormats(mime);
   if (formats.count(COLOR_FORMAT_YUV420_SEMIPLANAR) > 0)
     *pixel_format = COLOR_FORMAT_YUV420_SEMIPLANAR;
   else if (formats.count(COLOR_FORMAT_YUV420_PLANAR) > 0)
@@ -103,25 +100,25 @@
   DCHECK(thread_checker_.CalledOnValidThread());
 }
 
-media::VideoEncodeAccelerator::SupportedProfiles
+VideoEncodeAccelerator::SupportedProfiles
 AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
   SupportedProfiles profiles;
 
   const struct {
-    const media::VideoCodec codec;
-    const media::VideoCodecProfile profile;
-  } kSupportedCodecs[] = {{media::kCodecVP8, media::VP8PROFILE_ANY},
-                          {media::kCodecH264, media::H264PROFILE_BASELINE},
-                          {media::kCodecH264, media::H264PROFILE_MAIN}};
+    const VideoCodec codec;
+    const VideoCodecProfile profile;
+  } kSupportedCodecs[] = {{kCodecVP8, VP8PROFILE_ANY},
+                          {kCodecH264, H264PROFILE_BASELINE},
+                          {kCodecH264, H264PROFILE_MAIN}};
 
   for (const auto& supported_codec : kSupportedCodecs) {
-    if (supported_codec.codec == media::kCodecVP8 &&
-        !media::MediaCodecUtil::IsVp8EncoderAvailable()) {
+    if (supported_codec.codec == kCodecVP8 &&
+        !MediaCodecUtil::IsVp8EncoderAvailable()) {
       continue;
     }
 
     if (VideoCodecBridge::IsKnownUnaccelerated(supported_codec.codec,
-                                               media::MEDIA_CODEC_ENCODER)) {
+                                               MEDIA_CODEC_ENCODER)) {
       continue;
     }
 
@@ -138,9 +135,9 @@
 }
 
 bool AndroidVideoEncodeAccelerator::Initialize(
-    media::VideoPixelFormat format,
+    VideoPixelFormat format,
     const gfx::Size& input_visible_size,
-    media::VideoCodecProfile output_profile,
+    VideoCodecProfile output_profile,
     uint32_t initial_bitrate,
     Client* client) {
   DVLOG(3) << __PRETTY_FUNCTION__ << " format: " << format
@@ -152,25 +149,25 @@
 
   client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
 
-  if (!(media::MediaCodecUtil::SupportsSetParameters() &&
-        format == media::PIXEL_FORMAT_I420)) {
+  if (!(MediaCodecUtil::SupportsSetParameters() &&
+        format == PIXEL_FORMAT_I420)) {
     DLOG(ERROR) << "Unexpected combo: " << format << ", " << output_profile;
     return false;
   }
 
   std::string mime_type;
-  media::VideoCodec codec;
+  VideoCodec codec;
   // The client should be prepared to feed at least this many frames into the
   // encoder before being returned any output frames, since the encoder may
   // need to hold onto some subset of inputs as reference pictures.
   uint32_t frame_input_count;
-  if (output_profile == media::VP8PROFILE_ANY) {
-    codec = media::kCodecVP8;
+  if (output_profile == VP8PROFILE_ANY) {
+    codec = kCodecVP8;
     mime_type = "video/x-vnd.on2.vp8";
     frame_input_count = 1;
-  } else if (output_profile == media::H264PROFILE_BASELINE ||
-             output_profile == media::H264PROFILE_MAIN) {
-    codec = media::kCodecH264;
+  } else if (output_profile == H264PROFILE_BASELINE ||
+             output_profile == H264PROFILE_MAIN) {
+    codec = kCodecH264;
     mime_type = "video/avc";
     frame_input_count = 30;
   } else {
@@ -181,8 +178,7 @@
   last_set_bitrate_ = initial_bitrate;
 
   // Only consider using MediaCodec if it's likely backed by hardware.
-  if (media::VideoCodecBridge::IsKnownUnaccelerated(
-          codec, media::MEDIA_CODEC_ENCODER)) {
+  if (VideoCodecBridge::IsKnownUnaccelerated(codec, MEDIA_CODEC_ENCODER)) {
     DLOG(ERROR) << "No HW support";
     return false;
   }
@@ -192,7 +188,7 @@
     DLOG(ERROR) << "No color format support.";
     return false;
   }
-  media_codec_.reset(media::VideoCodecBridge::CreateEncoder(
+  media_codec_.reset(VideoCodecBridge::CreateEncoder(
       codec, input_visible_size, initial_bitrate, INITIAL_FRAMERATE,
       IFRAME_INTERVAL, pixel_format));
 
@@ -233,8 +229,8 @@
     bool force_keyframe) {
   DVLOG(3) << __PRETTY_FUNCTION__ << ": " << force_keyframe;
   DCHECK(thread_checker_.CalledOnValidThread());
-  RETURN_ON_FAILURE(frame->format() == media::PIXEL_FORMAT_I420,
-                    "Unexpected format", kInvalidArgumentError);
+  RETURN_ON_FAILURE(frame->format() == PIXEL_FORMAT_I420, "Unexpected format",
+                    kInvalidArgumentError);
   RETURN_ON_FAILURE(frame->visible_rect().size() == frame_size_,
                     "Unexpected resolution", kInvalidArgumentError);
   // MediaCodec doesn't have a way to specify stride for non-Packed formats, so
@@ -255,7 +251,7 @@
 }
 
 void AndroidVideoEncodeAccelerator::UseOutputBitstreamBuffer(
-    const media::BitstreamBuffer& buffer) {
+    const BitstreamBuffer& buffer) {
   DVLOG(3) << __PRETTY_FUNCTION__ << ": bitstream_buffer_id=" << buffer.id();
   DCHECK(thread_checker_.CalledOnValidThread());
   available_bitstream_buffers_.push_back(buffer);
@@ -302,12 +298,12 @@
     return;
 
   int input_buf_index = 0;
-  media::MediaCodecStatus status =
+  MediaCodecStatus status =
       media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
-  if (status != media::MEDIA_CODEC_OK) {
-    DCHECK(status == media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER ||
-           status == media::MEDIA_CODEC_ERROR);
-    RETURN_ON_FAILURE(status != media::MEDIA_CODEC_ERROR, "MediaCodec error",
+  if (status != MEDIA_CODEC_OK) {
+    DCHECK(status == MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER ||
+           status == MEDIA_CODEC_ERROR);
+    RETURN_ON_FAILURE(status != MEDIA_CODEC_ERROR, "MediaCodec error",
                       kPlatformFailureError);
     return;
   }
@@ -326,11 +322,11 @@
   uint8_t* buffer = NULL;
   size_t capacity = 0;
   status = media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
-  RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK, "GetInputBuffer failed.",
+  RETURN_ON_FAILURE(status == MEDIA_CODEC_OK, "GetInputBuffer failed.",
                     kPlatformFailureError);
 
   size_t queued_size =
-      VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, frame->coded_size());
+      VideoFrame::AllocationSize(PIXEL_FORMAT_I420, frame->coded_size());
   RETURN_ON_FAILURE(capacity >= queued_size,
                     "Failed to get input buffer: " << input_buf_index,
                     kPlatformFailureError);
@@ -356,7 +352,7 @@
                                           fake_input_timestamp_);
   UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime",
                       base::Time::Now() - std::get<2>(input));
-  RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK,
+  RETURN_ON_FAILURE(status == MEDIA_CODEC_OK,
                     "Failed to QueueInputBuffer: " << status,
                     kPlatformFailureError);
   ++num_buffers_at_codec_;
@@ -374,26 +370,26 @@
   size_t size = 0;
   bool key_frame = false;
   do {
-    media::MediaCodecStatus status = media_codec_->DequeueOutputBuffer(
+    MediaCodecStatus status = media_codec_->DequeueOutputBuffer(
         NoWaitTimeOut(), &buf_index, &offset, &size, NULL, NULL, &key_frame);
     switch (status) {
-      case media::MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
+      case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
         return;
 
-      case media::MEDIA_CODEC_ERROR:
+      case MEDIA_CODEC_ERROR:
         RETURN_ON_FAILURE(false, "Codec error", kPlatformFailureError);
         // Unreachable because of previous statement, but included for clarity.
         return;
 
-      case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+      case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
         RETURN_ON_FAILURE(false, "Unexpected output format change",
                           kPlatformFailureError);
         break;
 
-      case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+      case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
         break;
 
-      case media::MEDIA_CODEC_OK:
+      case MEDIA_CODEC_OK:
         DCHECK_GE(buf_index, 0);
         break;
 
@@ -403,7 +399,7 @@
     }
   } while (buf_index < 0);
 
-  media::BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
+  BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
   available_bitstream_buffers_.pop_back();
   std::unique_ptr<SharedMemoryRegion> shm(
       new SharedMemoryRegion(bitstream_buffer, false));
@@ -412,10 +408,10 @@
                     "Encoded buffer too large: " << size << ">" << shm->size(),
                     kPlatformFailureError);
 
-  media::MediaCodecStatus status = media_codec_->CopyFromOutputBuffer(
+  MediaCodecStatus status = media_codec_->CopyFromOutputBuffer(
       buf_index, offset, shm->memory(), size);
-  RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK,
-                    "CopyFromOutputBuffer failed", kPlatformFailureError);
+  RETURN_ON_FAILURE(status == MEDIA_CODEC_OK, "CopyFromOutputBuffer failed",
+                    kPlatformFailureError);
   media_codec_->ReleaseOutputBuffer(buf_index, false);
   --num_buffers_at_codec_;
 
diff --git a/media/gpu/android_video_encode_accelerator.h b/media/gpu/android_video_encode_accelerator.h
index 28b787bd..ce0fb704 100644
--- a/media/gpu/android_video_encode_accelerator.h
+++ b/media/gpu/android_video_encode_accelerator.h
@@ -23,33 +23,30 @@
 #include "media/video/video_encode_accelerator.h"
 
 namespace media {
+
 class BitstreamBuffer;
-}  // namespace media
 
-namespace media {
-
-// Android-specific implementation of media::VideoEncodeAccelerator, enabling
+// Android-specific implementation of VideoEncodeAccelerator, enabling
 // hardware-acceleration of video encoding, based on Android's MediaCodec class
 // (http://developer.android.com/reference/android/media/MediaCodec.html).  This
 // class expects to live and be called on a single thread (the GPU process'
 // ChildThread).
 class MEDIA_GPU_EXPORT AndroidVideoEncodeAccelerator
-    : public media::VideoEncodeAccelerator {
+    : public VideoEncodeAccelerator {
  public:
   AndroidVideoEncodeAccelerator();
   ~AndroidVideoEncodeAccelerator() override;
 
-  // media::VideoEncodeAccelerator implementation.
-  media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
-      override;
-  bool Initialize(media::VideoPixelFormat format,
+  // VideoEncodeAccelerator implementation.
+  VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+  bool Initialize(VideoPixelFormat format,
                   const gfx::Size& input_visible_size,
-                  media::VideoCodecProfile output_profile,
+                  VideoCodecProfile output_profile,
                   uint32_t initial_bitrate,
                   Client* client) override;
-  void Encode(const scoped_refptr<media::VideoFrame>& frame,
+  void Encode(const scoped_refptr<VideoFrame>& frame,
               bool force_keyframe) override;
-  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+  void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
   void RequestEncodingParametersChange(uint32_t bitrate,
                                        uint32_t framerate) override;
   void Destroy() override;
@@ -79,15 +76,14 @@
   // error triggers.
   std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
 
-  std::unique_ptr<media::VideoCodecBridge> media_codec_;
+  std::unique_ptr<VideoCodecBridge> media_codec_;
 
   // Bitstream buffers waiting to be populated & returned to the client.
-  std::vector<media::BitstreamBuffer> available_bitstream_buffers_;
+  std::vector<BitstreamBuffer> available_bitstream_buffers_;
 
   // Frames waiting to be passed to the codec, queued until an input buffer is
   // available.  Each element is a tuple of <Frame, key_frame, enqueue_time>.
-  typedef std::queue<
-      std::tuple<scoped_refptr<media::VideoFrame>, bool, base::Time>>
+  typedef std::queue<std::tuple<scoped_refptr<VideoFrame>, bool, base::Time>>
       PendingFrames;
   PendingFrames pending_frames_;
 
diff --git a/media/gpu/avda_codec_image.cc b/media/gpu/avda_codec_image.cc
index 4417252..05428fd 100644
--- a/media/gpu/avda_codec_image.cc
+++ b/media/gpu/avda_codec_image.cc
@@ -21,7 +21,7 @@
 AVDACodecImage::AVDACodecImage(
     int picture_buffer_id,
     const scoped_refptr<AVDASharedState>& shared_state,
-    media::VideoCodecBridge* codec,
+    VideoCodecBridge* codec,
     const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder,
     const scoped_refptr<gl::SurfaceTexture>& surface_texture)
     : shared_state_(shared_state),
@@ -147,7 +147,7 @@
   UpdateSurfaceInternal(update_mode, kDoRestoreBindings);
 }
 
-void AVDACodecImage::CodecChanged(media::MediaCodecBridge* codec) {
+void AVDACodecImage::CodecChanged(MediaCodecBridge* codec) {
   media_codec_ = codec;
   codec_buffer_index_ = kInvalidCodecBufferIndex;
 }
diff --git a/media/gpu/avda_codec_image.h b/media/gpu/avda_codec_image.h
index 95b2ebf..a60e7d70 100644
--- a/media/gpu/avda_codec_image.h
+++ b/media/gpu/avda_codec_image.h
@@ -27,7 +27,7 @@
  public:
   AVDACodecImage(int picture_buffer_id,
                  const scoped_refptr<AVDASharedState>& shared_state,
-                 media::VideoCodecBridge* codec,
+                 VideoCodecBridge* codec,
                  const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder,
                  const scoped_refptr<gl::SurfaceTexture>& surface_texture);
 
@@ -72,7 +72,7 @@
   void UpdateSurface(UpdateMode update_mode);
 
   // Updates the MediaCodec for this image; clears |codec_buffer_index_|.
-  void CodecChanged(media::MediaCodecBridge* codec);
+  void CodecChanged(MediaCodecBridge* codec);
 
   void set_texture(gpu::gles2::Texture* texture) { texture_ = texture; }
 
@@ -139,7 +139,7 @@
   gfx::Size size_;
 
   // May be null.
-  media::MediaCodecBridge* media_codec_;
+  MediaCodecBridge* media_codec_;
 
   const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder_;
 
diff --git a/media/gpu/avda_return_on_failure.h b/media/gpu/avda_return_on_failure.h
index b68d9a1..7826ca19 100644
--- a/media/gpu/avda_return_on_failure.h
+++ b/media/gpu/avda_return_on_failure.h
@@ -13,13 +13,13 @@
 // This is meant to be used only within AndroidVideoDecoder and the various
 // backing strategies.  |provider| must support PostError.  The varargs
 // can be used for the return value.
-#define RETURN_ON_FAILURE(provider, result, log, error, ...)                \
-  do {                                                                      \
-    if (!(result)) {                                                        \
-      DLOG(ERROR) << log;                                                   \
-      provider->PostError(FROM_HERE, media::VideoDecodeAccelerator::error); \
-      return __VA_ARGS__;                                                   \
-    }                                                                       \
+#define RETURN_ON_FAILURE(provider, result, log, error, ...)         \
+  do {                                                               \
+    if (!(result)) {                                                 \
+      DLOG(ERROR) << log;                                            \
+      provider->PostError(FROM_HERE, VideoDecodeAccelerator::error); \
+      return __VA_ARGS__;                                            \
+    }                                                                \
   } while (0)
 
 // Similar to the above, with some handy boilerplate savings.  The varargs
diff --git a/media/gpu/avda_shared_state.cc b/media/gpu/avda_shared_state.cc
index f75a837..12b2525 100644
--- a/media/gpu/avda_shared_state.cc
+++ b/media/gpu/avda_shared_state.cc
@@ -74,7 +74,7 @@
   DCHECK(surface_);
 }
 
-void AVDASharedState::CodecChanged(media::MediaCodecBridge* codec) {
+void AVDASharedState::CodecChanged(MediaCodecBridge* codec) {
   for (auto& image_kv : codec_images_)
     image_kv.second->CodecChanged(codec);
   release_time_ = base::TimeTicks();
@@ -99,7 +99,7 @@
 }
 
 void AVDASharedState::RenderCodecBufferToSurfaceTexture(
-    media::MediaCodecBridge* codec,
+    MediaCodecBridge* codec,
     int codec_buffer_index) {
   if (!release_time_.is_null())
     WaitForFrameAvailable();
diff --git a/media/gpu/avda_shared_state.h b/media/gpu/avda_shared_state.h
index c25cf70..d98d0ad 100644
--- a/media/gpu/avda_shared_state.h
+++ b/media/gpu/avda_shared_state.h
@@ -47,7 +47,7 @@
 
   // Iterates over all known codec images and updates the MediaCodec attached to
   // each one.
-  void CodecChanged(media::MediaCodecBridge* codec);
+  void CodecChanged(MediaCodecBridge* codec);
 
   // Methods for finding and updating the AVDACodecImage associated with a given
   // picture buffer id. GetImageForPicture() will return null for unknown ids.
@@ -72,7 +72,7 @@
   //
   // Some devices do not reliably notify frame availability, so we use a very
   // short deadline of only a few milliseconds to avoid indefinite stalls.
-  void RenderCodecBufferToSurfaceTexture(media::MediaCodecBridge* codec,
+  void RenderCodecBufferToSurfaceTexture(MediaCodecBridge* codec,
                                          int codec_buffer_index);
 
  protected:
diff --git a/media/gpu/avda_state_provider.h b/media/gpu/avda_state_provider.h
index 6946ded..2657c32 100644
--- a/media/gpu/avda_state_provider.h
+++ b/media/gpu/avda_state_provider.h
@@ -31,7 +31,7 @@
   virtual const base::ThreadChecker& ThreadChecker() const = 0;
   virtual base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const = 0;
   virtual gpu::gles2::TextureRef* GetTextureForPicture(
-      const media::PictureBuffer& picture_buffer) = 0;
+      const PictureBuffer& picture_buffer) = 0;
 
   // Create a SurfaceTexture and attach a new gl texture to it. |*service_id|
   // is set to the created texture id.
@@ -42,7 +42,7 @@
   // This will post NotifyError(), and transition to the error state.
   // It is meant to be called from the RETURN_ON_FAILURE macro.
   virtual void PostError(const ::tracked_objects::Location& from_here,
-                         media::VideoDecodeAccelerator::Error error) = 0;
+                         VideoDecodeAccelerator::Error error) = 0;
 
  protected:
   ~AVDAStateProvider() = default;
diff --git a/media/gpu/dxva_picture_buffer_win.cc b/media/gpu/dxva_picture_buffer_win.cc
index 59c9b304..c151325 100644
--- a/media/gpu/dxva_picture_buffer_win.cc
+++ b/media/gpu/dxva_picture_buffer_win.cc
@@ -36,7 +36,7 @@
 // static
 linked_ptr<DXVAPictureBuffer> DXVAPictureBuffer::Create(
     const DXVAVideoDecodeAccelerator& decoder,
-    const media::PictureBuffer& buffer,
+    const PictureBuffer& buffer,
     EGLConfig egl_config) {
   if (decoder.share_nv12_textures_) {
     linked_ptr<EGLStreamPictureBuffer> picture_buffer(
@@ -84,7 +84,7 @@
   return false;
 }
 
-DXVAPictureBuffer::DXVAPictureBuffer(const media::PictureBuffer& buffer)
+DXVAPictureBuffer::DXVAPictureBuffer(const PictureBuffer& buffer)
     : available_(true), picture_buffer_(buffer) {}
 
 bool DXVAPictureBuffer::BindSampleToTexture(
@@ -284,7 +284,7 @@
   return true;
 }
 
-PbufferPictureBuffer::PbufferPictureBuffer(const media::PictureBuffer& buffer)
+PbufferPictureBuffer::PbufferPictureBuffer(const PictureBuffer& buffer)
     : DXVAPictureBuffer(buffer),
       waiting_to_reuse_(false),
       decoding_surface_(NULL),
@@ -320,8 +320,7 @@
   return true;
 }
 
-EGLStreamPictureBuffer::EGLStreamPictureBuffer(
-    const media::PictureBuffer& buffer)
+EGLStreamPictureBuffer::EGLStreamPictureBuffer(const PictureBuffer& buffer)
     : DXVAPictureBuffer(buffer), stream_(nullptr) {}
 
 EGLStreamPictureBuffer::~EGLStreamPictureBuffer() {
diff --git a/media/gpu/dxva_picture_buffer_win.h b/media/gpu/dxva_picture_buffer_win.h
index 59a35d5..8381475 100644
--- a/media/gpu/dxva_picture_buffer_win.h
+++ b/media/gpu/dxva_picture_buffer_win.h
@@ -29,7 +29,7 @@
  public:
   static linked_ptr<DXVAPictureBuffer> Create(
       const DXVAVideoDecodeAccelerator& decoder,
-      const media::PictureBuffer& buffer,
+      const PictureBuffer& buffer,
       EGLConfig egl_config);
   virtual ~DXVAPictureBuffer();
 
@@ -62,10 +62,10 @@
   virtual bool BindSampleToTexture(base::win::ScopedComPtr<IMFSample> sample);
 
  protected:
-  explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
+  explicit DXVAPictureBuffer(const PictureBuffer& buffer);
 
   bool available_;
-  media::PictureBuffer picture_buffer_;
+  PictureBuffer picture_buffer_;
 
   DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer);
 };
@@ -73,7 +73,7 @@
 // Copies the video result into an RGBA EGL pbuffer.
 class PbufferPictureBuffer : public DXVAPictureBuffer {
  public:
-  explicit PbufferPictureBuffer(const media::PictureBuffer& buffer);
+  explicit PbufferPictureBuffer(const PictureBuffer& buffer);
   ~PbufferPictureBuffer() override;
 
   bool Initialize(const DXVAVideoDecodeAccelerator& decoder,
@@ -130,7 +130,7 @@
 // Shares the decoded texture with ANGLE without copying by using an EGL stream.
 class EGLStreamPictureBuffer : public DXVAPictureBuffer {
  public:
-  explicit EGLStreamPictureBuffer(const media::PictureBuffer& buffer);
+  explicit EGLStreamPictureBuffer(const PictureBuffer& buffer);
   ~EGLStreamPictureBuffer() override;
 
   bool Initialize();
diff --git a/media/gpu/dxva_video_decode_accelerator_win.cc b/media/gpu/dxva_video_decode_accelerator_win.cc
index cf841fa..7cd768d 100644
--- a/media/gpu/dxva_video_decode_accelerator_win.cc
+++ b/media/gpu/dxva_video_decode_accelerator_win.cc
@@ -218,11 +218,10 @@
 
 namespace media {
 
-static const media::VideoCodecProfile kSupportedProfiles[] = {
-    media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
-    media::H264PROFILE_HIGH,     media::VP8PROFILE_ANY,
-    media::VP9PROFILE_PROFILE0,  media::VP9PROFILE_PROFILE1,
-    media::VP9PROFILE_PROFILE2,  media::VP9PROFILE_PROFILE3};
+static const VideoCodecProfile kSupportedProfiles[] = {
+    H264PROFILE_BASELINE, H264PROFILE_MAIN,    H264PROFILE_HIGH,
+    VP8PROFILE_ANY,       VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE1,
+    VP9PROFILE_PROFILE2,  VP9PROFILE_PROFILE3};
 
 CreateDXGIDeviceManager
     DXVAVideoDecodeAccelerator::create_dxgi_device_manager_ = NULL;
@@ -460,40 +459,40 @@
                                             unsigned int size) {
   std::vector<uint8_t> sps;
   std::vector<uint8_t> pps;
-  media::H264NALU nalu;
+  H264NALU nalu;
   bool idr_seen = false;
 
   if (!parser_.get())
-    parser_.reset(new media::H264Parser);
+    parser_.reset(new H264Parser);
 
   parser_->SetStream(stream, size);
   config_changed_ = false;
 
   while (true) {
-    media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu);
+    H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu);
 
-    if (result == media::H264Parser::kEOStream)
+    if (result == H264Parser::kEOStream)
       break;
 
-    if (result == media::H264Parser::kUnsupportedStream) {
+    if (result == H264Parser::kUnsupportedStream) {
       DLOG(ERROR) << "Unsupported H.264 stream";
       return false;
     }
 
-    if (result != media::H264Parser::kOk) {
+    if (result != H264Parser::kOk) {
       DLOG(ERROR) << "Failed to parse H.264 stream";
       return false;
     }
 
     switch (nalu.nal_unit_type) {
-      case media::H264NALU::kSPS:
+      case H264NALU::kSPS:
         result = parser_->ParseSPS(&last_sps_id_);
-        if (result == media::H264Parser::kUnsupportedStream) {
+        if (result == H264Parser::kUnsupportedStream) {
           DLOG(ERROR) << "Unsupported SPS";
           return false;
         }
 
-        if (result != media::H264Parser::kOk) {
+        if (result != H264Parser::kOk) {
           DLOG(ERROR) << "Could not parse SPS";
           return false;
         }
@@ -501,20 +500,20 @@
         sps.assign(nalu.data, nalu.data + nalu.size);
         break;
 
-      case media::H264NALU::kPPS:
+      case H264NALU::kPPS:
         result = parser_->ParsePPS(&last_pps_id_);
-        if (result == media::H264Parser::kUnsupportedStream) {
+        if (result == H264Parser::kUnsupportedStream) {
           DLOG(ERROR) << "Unsupported PPS";
           return false;
         }
-        if (result != media::H264Parser::kOk) {
+        if (result != H264Parser::kOk) {
           DLOG(ERROR) << "Could not parse PPS";
           return false;
         }
         pps.assign(nalu.data, nalu.data + nalu.size);
         break;
 
-      case media::H264NALU::kIDRSlice:
+      case H264NALU::kIDRSlice:
         idr_seen = true;
         // If we previously detected a configuration change, and see an IDR
         // slice next time around, we need to flag a configuration change.
@@ -582,7 +581,7 @@
       sent_drain_message_(false),
       get_gl_context_cb_(get_gl_context_cb),
       make_context_current_cb_(make_context_current_cb),
-      codec_(media::kUnknownVideoCodec),
+      codec_(kUnknownVideoCodec),
       decoder_thread_("DXVAVideoDecoderThread"),
       pending_flush_(false),
       share_nv12_textures_(gpu_preferences.enable_zero_copy_dxgi_video),
@@ -680,7 +679,7 @@
                                "Initialize: invalid state: " << state,
                                ILLEGAL_STATE, false);
 
-  media::InitializeMediaFoundation();
+  InitializeMediaFoundation();
 
   RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(config.profile),
                                "Failed to initialize decoder", PLATFORM_FAILURE,
@@ -855,7 +854,7 @@
 }
 
 void DXVAVideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   TRACE_EVENT0("media", "DXVAVideoDecodeAccelerator::Decode");
   DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
 
@@ -894,7 +893,7 @@
 }
 
 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
 
   State state = GetState();
@@ -1108,7 +1107,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
   TRACE_EVENT0("gpu,startup",
                "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
@@ -1146,11 +1145,10 @@
 
 // static
 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution(
-    media::VideoCodecProfile profile) {
+    VideoCodecProfile profile) {
   TRACE_EVENT0("gpu,startup", "DXVAVideoDecodeAccelerator::GetMinResolution");
   std::pair<int, int> min_resolution;
-  if (profile >= media::H264PROFILE_BASELINE &&
-      profile <= media::H264PROFILE_HIGH) {
+  if (profile >= H264PROFILE_BASELINE && profile <= H264PROFILE_HIGH) {
     // Windows Media Foundation H.264 decoding does not support decoding videos
     // with any dimension smaller than 48 pixels:
     // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
@@ -1165,11 +1163,10 @@
 
 // static
 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution(
-    const media::VideoCodecProfile profile) {
+    const VideoCodecProfile profile) {
   TRACE_EVENT0("gpu,startup", "DXVAVideoDecodeAccelerator::GetMaxResolution");
   std::pair<int, int> max_resolution;
-  if (profile >= media::H264PROFILE_BASELINE &&
-      profile <= media::H264PROFILE_HIGH) {
+  if (profile >= H264PROFILE_BASELINE && profile <= H264PROFILE_HIGH) {
     max_resolution = GetMaxH264Resolution();
   } else {
     // TODO(ananta)
@@ -1344,13 +1341,13 @@
   return legacy_gpu;
 }
 
-bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
+bool DXVAVideoDecodeAccelerator::InitDecoder(VideoCodecProfile profile) {
   HMODULE decoder_dll = NULL;
 
   CLSID clsid = {};
 
   // Profile must fall within the valid range for one of the supported codecs.
-  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
+  if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
     // We mimic the steps CoCreateInstance uses to instantiate the object. This
     // was previously done because it failed inside the sandbox, and now is done
     // as a more minimal approach to avoid other side-effects CCI might have (as
@@ -1370,14 +1367,13 @@
     base::string16 file_version = version_info->file_version();
     RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos,
                       "blacklisted version of msmpeg2vdec.dll 6.1.7140", false);
-    codec_ = media::kCodecH264;
+    codec_ = kCodecH264;
     clsid = __uuidof(CMSH264DecoderMFT);
   } else if (enable_accelerated_vpx_decode_ &&
-             (profile == media::VP8PROFILE_ANY ||
-              profile == media::VP9PROFILE_PROFILE0 ||
-              profile == media::VP9PROFILE_PROFILE1 ||
-              profile == media::VP9PROFILE_PROFILE2 ||
-              profile == media::VP9PROFILE_PROFILE3)) {
+             (profile == VP8PROFILE_ANY || profile == VP9PROFILE_PROFILE0 ||
+              profile == VP9PROFILE_PROFILE1 ||
+              profile == VP9PROFILE_PROFILE2 ||
+              profile == VP9PROFILE_PROFILE3)) {
     int program_files_key = base::DIR_PROGRAM_FILES;
     if (base::win::OSInfo::GetInstance()->wow64_status() ==
         base::win::OSInfo::WOW64_ENABLED) {
@@ -1389,12 +1385,12 @@
                       "failed to get path for Program Files", false);
 
     dll_path = dll_path.Append(kVPXDecoderDLLPath);
-    if (profile == media::VP8PROFILE_ANY) {
-      codec_ = media::kCodecVP8;
+    if (profile == VP8PROFILE_ANY) {
+      codec_ = kCodecVP8;
       dll_path = dll_path.Append(kVP8DecoderDLLName);
       clsid = CLSID_WebmMfVp8Dec;
     } else {
-      codec_ = media::kCodecVP9;
+      codec_ = kCodecVP9;
       dll_path = dll_path.Append(kVP9DecoderDLLName);
       clsid = CLSID_WebmMfVp9Dec;
     }
@@ -1463,7 +1459,7 @@
   hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
   RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false);
 
-  if (codec_ == media::kCodecH264) {
+  if (codec_ == kCodecH264) {
     hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
     RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false);
   }
@@ -1518,11 +1514,11 @@
   hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
   RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false);
 
-  if (codec_ == media::kCodecH264) {
+  if (codec_ == kCodecH264) {
     hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
-  } else if (codec_ == media::kCodecVP8) {
+  } else if (codec_ == kCodecVP8) {
     hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP80);
-  } else if (codec_ == media::kCodecVP9) {
+  } else if (codec_ == kCodecVP9) {
     hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP90);
   } else {
     NOTREACHED();
@@ -1575,7 +1571,7 @@
 
   DVLOG(1) << "Input stream info: ";
   DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
-  if (codec_ == media::kCodecH264) {
+  if (codec_ == kCodecH264) {
     // There should be three flags, one for requiring a whole frame be in a
     // single sample, one for requiring there be one buffer only in a single
     // sample, and one that specifies a fixed sample size. (as in cbSize)
@@ -1592,7 +1588,7 @@
   // allocate its own sample.
   DVLOG(1) << "Flags: " << std::hex << std::showbase
            << output_stream_info_.dwFlags;
-  if (codec_ == media::kCodecH264) {
+  if (codec_ == kCodecH264) {
     CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
   }
   DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
@@ -1787,7 +1783,7 @@
 }
 
 void DXVAVideoDecodeAccelerator::StopOnError(
-    media::VideoDecodeAccelerator::Error error) {
+    VideoDecodeAccelerator::Error error) {
   if (!main_thread_task_runner_->BelongsToCurrentThread()) {
     main_thread_task_runner_->PostTask(
         FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::StopOnError,
@@ -1900,8 +1896,7 @@
   if (GetState() != kUninitialized && client_) {
     // TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use
     // coded size here so use (0, 0) intentionally to have the client choose.
-    media::Picture picture(picture_buffer_id, input_buffer_id, gfx::Rect(0, 0),
-                           false);
+    Picture picture(picture_buffer_id, input_buffer_id, gfx::Rect(0, 0), false);
     client_->PictureReady(picture);
   }
 }
@@ -2634,7 +2629,7 @@
 
 HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(IMFSample* sample,
                                                        bool* config_changed) {
-  if (codec_ != media::kCodecH264)
+  if (codec_ != kCodecH264)
     return S_FALSE;
 
   base::win::ScopedComPtr<IMFMediaBuffer> buffer;
diff --git a/media/gpu/dxva_video_decode_accelerator_win.h b/media/gpu/dxva_video_decode_accelerator_win.h
index 932b2d9..54da96b4 100644
--- a/media/gpu/dxva_video_decode_accelerator_win.h
+++ b/media/gpu/dxva_video_decode_accelerator_win.h
@@ -85,7 +85,7 @@
   // we want to honor after we see an IDR slice.
   bool pending_config_changed_;
 
-  std::unique_ptr<media::H264Parser> parser_;
+  std::unique_ptr<H264Parser> parser_;
 
   DISALLOW_COPY_AND_ASSIGN(H264ConfigChangeDetector);
 };
@@ -95,7 +95,7 @@
 // This class lives on a single thread and DCHECKs that it is never accessed
 // from any other.
 class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator {
+    : public VideoDecodeAccelerator {
  public:
   enum State {
     kUninitialized,  // un-initialized.
@@ -113,11 +113,10 @@
       const gpu::GpuPreferences& gpu_preferences);
   ~DXVAVideoDecodeAccelerator() override;
 
-  // media::VideoDecodeAccelerator implementation.
+  // VideoDecodeAccelerator implementation.
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
@@ -128,8 +127,7 @@
       override;
   GLenum GetSurfaceInternalFormat() const override;
 
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
 
   // Preload dlls required for decoding.
   static void PreSandboxInitialization();
@@ -142,12 +140,10 @@
   typedef void* EGLSurface;
 
   // Returns the minimum resolution for the |profile| passed in.
-  static std::pair<int, int> GetMinResolution(
-      const media::VideoCodecProfile profile);
+  static std::pair<int, int> GetMinResolution(const VideoCodecProfile profile);
 
   // Returns the maximum resolution for the |profile| passed in.
-  static std::pair<int, int> GetMaxResolution(
-      const media::VideoCodecProfile profile);
+  static std::pair<int, int> GetMaxResolution(const VideoCodecProfile profile);
 
   // Returns the maximum resolution for H264 video.
   static std::pair<int, int> GetMaxH264Resolution();
@@ -169,7 +165,7 @@
   bool CreateDX11DevManager();
 
   // Creates, initializes and sets the media codec types for the decoder.
-  bool InitDecoder(media::VideoCodecProfile profile);
+  bool InitDecoder(VideoCodecProfile profile);
 
   // Validates whether the decoder supports hardware video acceleration.
   bool CheckDecoderDxvaSupport();
@@ -206,7 +202,7 @@
   void ProcessPendingSamples();
 
   // Helper function to notify the accelerator client about the error.
-  void StopOnError(media::VideoDecodeAccelerator::Error error);
+  void StopOnError(VideoDecodeAccelerator::Error error);
 
   // Transitions the decoder to the uninitialized state. The decoder will stop
   // accepting requests in this state.
@@ -345,7 +341,7 @@
   void ConfigChanged(const Config& config);
 
   // To expose client callbacks from VideoDecodeAccelerator.
-  media::VideoDecodeAccelerator::Client* client_;
+  VideoDecodeAccelerator::Client* client_;
 
   base::win::ScopedComPtr<IMFTransform> decoder_;
   base::win::ScopedComPtr<IMFTransform> video_format_converter_mft_;
@@ -434,7 +430,7 @@
   MakeGLContextCurrentCallback make_context_current_cb_;
 
   // Which codec we are decoding with hardware acceleration.
-  media::VideoCodec codec_;
+  VideoCodec codec_;
   // Thread on which the decoder operations like passing input frames,
   // getting output frames are performed. One instance of this thread
   // is created per decoder instance.
diff --git a/media/gpu/fake_video_decode_accelerator.cc b/media/gpu/fake_video_decode_accelerator.cc
index 7b13cb21..39cf919 100644
--- a/media/gpu/fake_video_decode_accelerator.cc
+++ b/media/gpu/fake_video_decode_accelerator.cc
@@ -28,7 +28,7 @@
 // Must also be an even number as otherwise there won't be the same amount of
 // white and black frames.
 static const unsigned int kNumBuffers =
-    media::limits::kMaxVideoFrames + (media::limits::kMaxVideoFrames & 1u);
+    limits::kMaxVideoFrames + (limits::kMaxVideoFrames & 1u);
 
 FakeVideoDecodeAccelerator::FakeVideoDecodeAccelerator(
     const gfx::Size& size,
@@ -45,7 +45,7 @@
 bool FakeVideoDecodeAccelerator::Initialize(const Config& config,
                                             Client* client) {
   DCHECK(child_task_runner_->BelongsToCurrentThread());
-  if (config.profile == media::VIDEO_CODEC_PROFILE_UNKNOWN) {
+  if (config.profile == VIDEO_CODEC_PROFILE_UNKNOWN) {
     LOG(ERROR) << "unknown codec profile";
     return false;
   }
@@ -63,7 +63,7 @@
 }
 
 void FakeVideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   // We won't really read from the bitstream_buffer, close the handle.
   if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
     base::SharedMemory::CloseHandle(bitstream_buffer.handle());
@@ -83,7 +83,7 @@
 
 // Similar to UseOutputBitstreamBuffer for the encode accelerator.
 void FakeVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DCHECK(buffers.size() == kNumBuffers);
   DCHECK(!(buffers.size() % 2));
 
@@ -169,8 +169,8 @@
     int buffer_id = free_output_buffers_.front();
     free_output_buffers_.pop();
 
-    const media::Picture picture = media::Picture(
-        buffer_id, bitstream_id, gfx::Rect(frame_buffer_size_), false);
+    const Picture picture =
+        Picture(buffer_id, bitstream_id, gfx::Rect(frame_buffer_size_), false);
     client_->PictureReady(picture);
     // Bitstream no longer needed.
     client_->NotifyEndOfBitstreamBuffer(bitstream_id);
diff --git a/media/gpu/fake_video_decode_accelerator.h b/media/gpu/fake_video_decode_accelerator.h
index b8940527..61bb9be0 100644
--- a/media/gpu/fake_video_decode_accelerator.h
+++ b/media/gpu/fake_video_decode_accelerator.h
@@ -21,7 +21,7 @@
 namespace media {
 
 class MEDIA_GPU_EXPORT FakeVideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator {
+    : public VideoDecodeAccelerator {
  public:
   FakeVideoDecodeAccelerator(
       const gfx::Size& size,
@@ -29,9 +29,8 @@
   ~FakeVideoDecodeAccelerator() override;
 
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
diff --git a/media/gpu/generic_v4l2_device.cc b/media/gpu/generic_v4l2_device.cc
index 57fe87a..ebdc867 100644
--- a/media/gpu/generic_v4l2_device.cc
+++ b/media/gpu/generic_v4l2_device.cc
@@ -232,10 +232,10 @@
     return EGL_NO_IMAGE_KHR;
   }
 
-  media::VideoPixelFormat vf_format = V4L2PixFmtToVideoPixelFormat(v4l2_pixfmt);
+  VideoPixelFormat vf_format = V4L2PixFmtToVideoPixelFormat(v4l2_pixfmt);
   // Number of components, as opposed to the number of V4L2 planes, which is
   // just a buffer count.
-  size_t num_planes = media::VideoFrame::NumPlanes(vf_format);
+  size_t num_planes = VideoFrame::NumPlanes(vf_format);
   DCHECK_LE(num_planes, 3u);
   if (num_planes < dmabuf_fds.size()) {
     // It's possible for more than one DRM plane to reside in one V4L2 plane,
@@ -266,15 +266,13 @@
     attrs.push_back(EGL_DMA_BUF_PLANE0_OFFSET_EXT + plane * 3);
     attrs.push_back(plane_offset);
     attrs.push_back(EGL_DMA_BUF_PLANE0_PITCH_EXT + plane * 3);
-    attrs.push_back(
-        media::VideoFrame::RowBytes(plane, vf_format, size.width()));
+    attrs.push_back(VideoFrame::RowBytes(plane, vf_format, size.width()));
 
     if (v4l2_plane + 1 < dmabuf_fds.size()) {
       ++v4l2_plane;
       plane_offset = 0;
     } else {
-      plane_offset +=
-          media::VideoFrame::PlaneSize(vf_format, plane, size).GetArea();
+      plane_offset += VideoFrame::PlaneSize(vf_format, plane, size).GetArea();
     }
   }
 
diff --git a/media/gpu/gpu_video_decode_accelerator_factory_impl.cc b/media/gpu/gpu_video_decode_accelerator_factory_impl.cc
index b0dbf90..00c8aaf 100644
--- a/media/gpu/gpu_video_decode_accelerator_factory_impl.cc
+++ b/media/gpu/gpu_video_decode_accelerator_factory_impl.cc
@@ -65,7 +65,7 @@
 MEDIA_GPU_EXPORT gpu::VideoDecodeAcceleratorCapabilities
 GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
     const gpu::GpuPreferences& gpu_preferences) {
-  media::VideoDecodeAccelerator::Capabilities capabilities;
+  VideoDecodeAccelerator::Capabilities capabilities;
   if (gpu_preferences.disable_accelerated_video_decode)
     return gpu::VideoDecodeAcceleratorCapabilities();
 
@@ -80,18 +80,18 @@
   capabilities.supported_profiles =
       DXVAVideoDecodeAccelerator::GetSupportedProfiles();
 #elif defined(OS_CHROMEOS)
-  media::VideoDecodeAccelerator::SupportedProfiles vda_profiles;
+  VideoDecodeAccelerator::SupportedProfiles vda_profiles;
 #if defined(USE_V4L2_CODEC)
   vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles();
-  media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+  GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
       vda_profiles, &capabilities.supported_profiles);
   vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
-  media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+  GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
       vda_profiles, &capabilities.supported_profiles);
 #endif
 #if defined(ARCH_CPU_X86_FAMILY)
   vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles();
-  media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+  GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
       vda_profiles, &capabilities.supported_profiles);
 #endif
 #elif defined(OS_MACOSX)
@@ -101,14 +101,14 @@
   capabilities =
       AndroidVideoDecodeAccelerator::GetCapabilities(gpu_preferences);
 #endif
-  return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
+  return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
       capabilities);
 }
 
-MEDIA_GPU_EXPORT std::unique_ptr<media::VideoDecodeAccelerator>
+MEDIA_GPU_EXPORT std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateVDA(
-    media::VideoDecodeAccelerator::Client* client,
-    const media::VideoDecodeAccelerator::Config& config,
+    VideoDecodeAccelerator::Client* client,
+    const VideoDecodeAccelerator::Config& config,
     const gpu::GpuPreferences& gpu_preferences) {
   DCHECK(thread_checker_.CalledOnValidThread());
 
@@ -119,7 +119,7 @@
   // platform. This list is ordered by priority, from most to least preferred,
   // if applicable. This list must be in the same order as the querying order
   // in GetDecoderCapabilities() above.
-  using CreateVDAFp = std::unique_ptr<media::VideoDecodeAccelerator> (
+  using CreateVDAFp = std::unique_ptr<VideoDecodeAccelerator> (
       GpuVideoDecodeAcceleratorFactoryImpl::*)(const gpu::GpuPreferences&)
       const;
   const CreateVDAFp create_vda_fps[] = {
@@ -141,7 +141,7 @@
 #endif
   };
 
-  std::unique_ptr<media::VideoDecodeAccelerator> vda;
+  std::unique_ptr<VideoDecodeAccelerator> vda;
 
   for (const auto& create_vda_function : create_vda_fps) {
     vda = (this->*create_vda_function)(gpu_preferences);
@@ -153,10 +153,10 @@
 }
 
 #if defined(OS_WIN)
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateDXVAVDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   if (base::win::GetVersion() >= base::win::VERSION_WIN7) {
     DVLOG(0) << "Initializing DXVA HW decoder for windows.";
     decoder.reset(new DXVAVideoDecodeAccelerator(
@@ -167,10 +167,10 @@
 #endif
 
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2VDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
   if (device.get()) {
     decoder.reset(new V4L2VideoDecodeAccelerator(
@@ -180,10 +180,10 @@
   return decoder;
 }
 
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2SVDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
   if (device.get()) {
     decoder.reset(new V4L2SliceVideoDecodeAccelerator(
@@ -195,10 +195,10 @@
 #endif
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateVaapiVDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   decoder.reset(new VaapiVideoDecodeAccelerator(make_context_current_cb_,
                                                 bind_image_cb_));
   return decoder;
@@ -206,10 +206,10 @@
 #endif
 
 #if defined(OS_MACOSX)
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateVTVDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   decoder.reset(
       new VTVideoDecodeAccelerator(make_context_current_cb_, bind_image_cb_));
   return decoder;
@@ -217,10 +217,10 @@
 #endif
 
 #if defined(OS_ANDROID)
-std::unique_ptr<media::VideoDecodeAccelerator>
+std::unique_ptr<VideoDecodeAccelerator>
 GpuVideoDecodeAcceleratorFactoryImpl::CreateAndroidVDA(
     const gpu::GpuPreferences& gpu_preferences) const {
-  std::unique_ptr<media::VideoDecodeAccelerator> decoder;
+  std::unique_ptr<VideoDecodeAccelerator> decoder;
   decoder.reset(new AndroidVideoDecodeAccelerator(make_context_current_cb_,
                                                   get_gles2_decoder_cb_));
   return decoder;
diff --git a/media/gpu/gpu_video_decode_accelerator_factory_impl.h b/media/gpu/gpu_video_decode_accelerator_factory_impl.h
index db383062..931d160 100644
--- a/media/gpu/gpu_video_decode_accelerator_factory_impl.h
+++ b/media/gpu/gpu_video_decode_accelerator_factory_impl.h
@@ -74,9 +74,9 @@
   static gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilities(
       const gpu::GpuPreferences& gpu_preferences);
 
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateVDA(
-      media::VideoDecodeAccelerator::Client* client,
-      const media::VideoDecodeAccelerator::Config& config,
+  std::unique_ptr<VideoDecodeAccelerator> CreateVDA(
+      VideoDecodeAccelerator::Client* client,
+      const VideoDecodeAccelerator::Config& config,
       const gpu::GpuPreferences& gpu_preferences);
 
  private:
@@ -87,25 +87,25 @@
       const GetGLES2DecoderCallback& get_gles2_decoder_cb);
 
 #if defined(OS_WIN)
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateDXVAVDA(
       const gpu::GpuPreferences& gpu_preferences) const;
 #endif
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateV4L2VDA(
       const gpu::GpuPreferences& gpu_preferences) const;
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateV4L2SVDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateV4L2SVDA(
       const gpu::GpuPreferences& gpu_preferences) const;
 #endif
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateVaapiVDA(
       const gpu::GpuPreferences& gpu_preferences) const;
 #endif
 #if defined(OS_MACOSX)
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateVTVDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateVTVDA(
       const gpu::GpuPreferences& gpu_preferences) const;
 #endif
 #if defined(OS_ANDROID)
-  std::unique_ptr<media::VideoDecodeAccelerator> CreateAndroidVDA(
+  std::unique_ptr<VideoDecodeAccelerator> CreateAndroidVDA(
       const gpu::GpuPreferences& gpu_preferences) const;
 #endif
 
diff --git a/media/gpu/h264_decoder.cc b/media/gpu/h264_decoder.cc
index 43689a4..e6192f92 100644
--- a/media/gpu/h264_decoder.cc
+++ b/media/gpu/h264_decoder.cc
@@ -63,15 +63,14 @@
     state_ = kAfterReset;
 }
 
-void H264Decoder::PrepareRefPicLists(const media::H264SliceHeader* slice_hdr) {
+void H264Decoder::PrepareRefPicLists(const H264SliceHeader* slice_hdr) {
   ConstructReferencePicListsP(slice_hdr);
   ConstructReferencePicListsB(slice_hdr);
 }
 
-bool H264Decoder::ModifyReferencePicLists(
-    const media::H264SliceHeader* slice_hdr,
-    H264Picture::Vector* ref_pic_list0,
-    H264Picture::Vector* ref_pic_list1) {
+bool H264Decoder::ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
+                                          H264Picture::Vector* ref_pic_list0,
+                                          H264Picture::Vector* ref_pic_list1) {
   ref_pic_list0->clear();
   ref_pic_list1->clear();
 
@@ -109,7 +108,7 @@
   return CalculatePicOrderCounts(pic);
 }
 
-bool H264Decoder::InitCurrPicture(const media::H264SliceHeader* slice_hdr) {
+bool H264Decoder::InitCurrPicture(const H264SliceHeader* slice_hdr) {
   DCHECK(curr_pic_.get());
 
   curr_pic_->idr = slice_hdr->idr_pic_flag;
@@ -134,7 +133,7 @@
   curr_pic_->frame_num = curr_pic_->pic_num = slice_hdr->frame_num;
 
   DCHECK_NE(curr_sps_id_, -1);
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -181,7 +180,7 @@
 }
 
 bool H264Decoder::CalculatePicOrderCounts(scoped_refptr<H264Picture> pic) {
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -388,7 +387,7 @@
 };
 
 void H264Decoder::ConstructReferencePicListsP(
-    const media::H264SliceHeader* slice_hdr) {
+    const H264SliceHeader* slice_hdr) {
   // RefPicList0 (8.2.4.2.1) [[1] [2]], where:
   // [1] shortterm ref pics sorted by descending pic_num,
   // [2] longterm ref pics by ascending long_term_pic_num.
@@ -423,7 +422,7 @@
 };
 
 void H264Decoder::ConstructReferencePicListsB(
-    const media::H264SliceHeader* slice_hdr) {
+    const H264SliceHeader* slice_hdr) {
   // RefPicList0 (8.2.4.2.3) [[1] [2] [3]], where:
   // [1] shortterm ref pics with POC < curr_pic's POC sorted by descending POC,
   // [2] shortterm ref pics with POC > curr_pic's POC by ascending POC,
@@ -521,13 +520,12 @@
   (*v)[from] = pic;
 }
 
-bool H264Decoder::ModifyReferencePicList(
-    const media::H264SliceHeader* slice_hdr,
-    int list,
-    H264Picture::Vector* ref_pic_listx) {
+bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
+                                         int list,
+                                         H264Picture::Vector* ref_pic_listx) {
   bool ref_pic_list_modification_flag_lX;
   int num_ref_idx_lX_active_minus1;
-  const media::H264ModificationOfPicNum* list_mod;
+  const H264ModificationOfPicNum* list_mod;
 
   // This can process either ref_pic_list0 or ref_pic_list1, depending on
   // the list argument. Set up pointers to proper list to be processed here.
@@ -562,7 +560,7 @@
   int pic_num_lx;
   bool done = false;
   scoped_refptr<H264Picture> pic;
-  for (int i = 0; i < media::H264SliceHeader::kRefListModSize && !done; ++i) {
+  for (int i = 0; i < H264SliceHeader::kRefListModSize && !done; ++i) {
     switch (list_mod->modification_of_pic_nums_idc) {
       case 0:
       case 1:
@@ -596,7 +594,7 @@
           pic_num_lx = pic_num_lx_no_wrap;
 
         DCHECK_LT(num_ref_idx_lX_active_minus1 + 1,
-                  media::H264SliceHeader::kRefListModSize);
+                  H264SliceHeader::kRefListModSize);
         pic = dpb_.GetShortRefPicByPicNum(pic_num_lx);
         if (!pic) {
           DVLOG(1) << "Malformed stream, no pic num " << pic_num_lx;
@@ -616,7 +614,7 @@
       case 2:
         // Modify long term reference picture position.
         DCHECK_LT(num_ref_idx_lX_active_minus1 + 1,
-                  media::H264SliceHeader::kRefListModSize);
+                  H264SliceHeader::kRefListModSize);
         pic = dpb_.GetLongRefPicByLongTermPicNum(list_mod->long_term_pic_num);
         if (!pic) {
           DVLOG(1) << "Malformed stream, no pic num "
@@ -708,18 +706,18 @@
   return true;
 }
 
-bool H264Decoder::StartNewFrame(const media::H264SliceHeader* slice_hdr) {
+bool H264Decoder::StartNewFrame(const H264SliceHeader* slice_hdr) {
   // TODO posciak: add handling of max_num_ref_frames per spec.
   CHECK(curr_pic_.get());
   DCHECK(slice_hdr);
 
   curr_pps_id_ = slice_hdr->pic_parameter_set_id;
-  const media::H264PPS* pps = parser_.GetPPS(curr_pps_id_);
+  const H264PPS* pps = parser_.GetPPS(curr_pps_id_);
   if (!pps)
     return false;
 
   curr_sps_id_ = pps->seq_parameter_set_id;
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -753,7 +751,7 @@
   // 8.2.5.4
   for (size_t i = 0; i < arraysize(pic->ref_pic_marking); ++i) {
     // Code below does not support interlaced stream (per-field pictures).
-    media::H264DecRefPicMarking* ref_pic_marking = &pic->ref_pic_marking[i];
+    H264DecRefPicMarking* ref_pic_marking = &pic->ref_pic_marking[i];
     scoped_refptr<H264Picture> to_mark;
     int pic_num_x;
 
@@ -892,7 +890,7 @@
 }
 
 bool H264Decoder::SlidingWindowPictureMarking() {
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -1037,7 +1035,7 @@
   }
 }
 
-bool H264Decoder::UpdateMaxNumReorderFrames(const media::H264SPS* sps) {
+bool H264Decoder::UpdateMaxNumReorderFrames(const H264SPS* sps) {
   if (sps->vui_parameters_present_flag && sps->bitstream_restriction_flag) {
     max_num_reorder_frames_ =
         base::checked_cast<size_t>(sps->max_num_reorder_frames);
@@ -1077,7 +1075,7 @@
 bool H264Decoder::ProcessSPS(int sps_id, bool* need_new_buffers) {
   DVLOG(4) << "Processing SPS id:" << sps_id;
 
-  const media::H264SPS* sps = parser_.GetSPS(sps_id);
+  const H264SPS* sps = parser_.GetSPS(sps_id);
   if (!sps)
     return false;
 
@@ -1154,7 +1152,7 @@
 }
 
 bool H264Decoder::HandleFrameNumGap(int frame_num) {
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -1186,7 +1184,7 @@
 }
 
 bool H264Decoder::IsNewPrimaryCodedPicture(
-    const media::H264SliceHeader* slice_hdr) const {
+    const H264SliceHeader* slice_hdr) const {
   if (!curr_pic_)
     return true;
 
@@ -1207,7 +1205,7 @@
         slice_hdr->first_mb_in_slice == 0)))
     return true;
 
-  const media::H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+  const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
   if (!sps)
     return false;
 
@@ -1228,7 +1226,7 @@
 }
 
 bool H264Decoder::PreprocessCurrentSlice() {
-  const media::H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
+  const H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
   DCHECK(slice_hdr);
 
   if (IsNewPrimaryCodedPicture(slice_hdr)) {
@@ -1263,7 +1261,7 @@
 bool H264Decoder::ProcessCurrentSlice() {
   DCHECK(curr_pic_);
 
-  const media::H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
+  const H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
   DCHECK(slice_hdr);
 
   if (slice_hdr->field_pic_flag == 0)
@@ -1275,7 +1273,7 @@
   if (!ModifyReferencePicLists(slice_hdr, &ref_pic_list0, &ref_pic_list1))
     return false;
 
-  const media::H264PPS* pps = parser_.GetPPS(curr_pps_id_);
+  const H264PPS* pps = parser_.GetPPS(curr_pps_id_);
   if (!pps)
     return false;
 
@@ -1309,26 +1307,27 @@
   }
 
   while (1) {
-    media::H264Parser::Result par_res;
+    H264Parser::Result par_res;
 
     if (!curr_nalu_) {
-      curr_nalu_.reset(new media::H264NALU());
+      curr_nalu_.reset(new H264NALU());
       par_res = parser_.AdvanceToNextNALU(curr_nalu_.get());
-      if (par_res == media::H264Parser::kEOStream)
+      if (par_res == H264Parser::kEOStream)
         return kRanOutOfStreamData;
-      else if (par_res != media::H264Parser::kOk)
+      else if (par_res != H264Parser::kOk)
         SET_ERROR_AND_RETURN();
 
       DVLOG(4) << "New NALU: " << static_cast<int>(curr_nalu_->nal_unit_type);
     }
 
     switch (curr_nalu_->nal_unit_type) {
-      case media::H264NALU::kNonIDRSlice:
+      case H264NALU::kNonIDRSlice:
         // We can't resume from a non-IDR slice.
         if (state_ != kDecoding)
           break;
-        // else fallthrough
-      case media::H264NALU::kIDRSlice: {
+
+      // else fallthrough
+      case H264NALU::kIDRSlice: {
         // TODO(posciak): the IDR may require an SPS that we don't have
         // available. For now we'd fail if that happens, but ideally we'd like
         // to keep going until the next SPS in the stream.
@@ -1341,10 +1340,10 @@
         state_ = kDecoding;
 
         if (!curr_slice_hdr_) {
-          curr_slice_hdr_.reset(new media::H264SliceHeader());
+          curr_slice_hdr_.reset(new H264SliceHeader());
           par_res =
               parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
-          if (par_res != media::H264Parser::kOk)
+          if (par_res != H264Parser::kOk)
             SET_ERROR_AND_RETURN();
 
           if (!PreprocessCurrentSlice())
@@ -1369,14 +1368,14 @@
         break;
       }
 
-      case media::H264NALU::kSPS: {
+      case H264NALU::kSPS: {
         int sps_id;
 
         if (!FinishPrevFrameIfPresent())
           SET_ERROR_AND_RETURN();
 
         par_res = parser_.ParseSPS(&sps_id);
-        if (par_res != media::H264Parser::kOk)
+        if (par_res != H264Parser::kOk)
           SET_ERROR_AND_RETURN();
 
         bool need_new_buffers = false;
@@ -1401,22 +1400,22 @@
         break;
       }
 
-      case media::H264NALU::kPPS: {
+      case H264NALU::kPPS: {
         int pps_id;
 
         if (!FinishPrevFrameIfPresent())
           SET_ERROR_AND_RETURN();
 
         par_res = parser_.ParsePPS(&pps_id);
-        if (par_res != media::H264Parser::kOk)
+        if (par_res != H264Parser::kOk)
           SET_ERROR_AND_RETURN();
 
         break;
       }
 
-      case media::H264NALU::kAUD:
-      case media::H264NALU::kEOSeq:
-      case media::H264NALU::kEOStream:
+      case H264NALU::kAUD:
+      case H264NALU::kEOSeq:
+      case H264NALU::kEOStream:
         if (state_ != kDecoding)
           break;
 
diff --git a/media/gpu/h264_decoder.h b/media/gpu/h264_decoder.h
index fc0e6f8..1caf662 100644
--- a/media/gpu/h264_decoder.h
+++ b/media/gpu/h264_decoder.h
@@ -53,8 +53,8 @@
     // is expected to follow this call with one or more SubmitSlice() calls
     // before calling SubmitDecode().
     // Return true if successful.
-    virtual bool SubmitFrameMetadata(const media::H264SPS* sps,
-                                     const media::H264PPS* pps,
+    virtual bool SubmitFrameMetadata(const H264SPS* sps,
+                                     const H264PPS* pps,
                                      const H264DPB& dpb,
                                      const H264Picture::Vector& ref_pic_listp0,
                                      const H264Picture::Vector& ref_pic_listb0,
@@ -70,8 +70,8 @@
     // This must be called one or more times per frame, before SubmitDecode().
     // Note that |data| does not have to remain valid after this call returns.
     // Return true if successful.
-    virtual bool SubmitSlice(const media::H264PPS* pps,
-                             const media::H264SliceHeader* slice_hdr,
+    virtual bool SubmitSlice(const H264PPS* pps,
+                             const H264SliceHeader* slice_hdr,
                              const H264Picture::Vector& ref_pic_list0,
                              const H264Picture::Vector& ref_pic_list1,
                              const scoped_refptr<H264Picture>& pic,
@@ -103,7 +103,7 @@
   H264Decoder(H264Accelerator* accelerator);
   ~H264Decoder() override;
 
-  // media::AcceleratedVideoDecoder implementation.
+  // AcceleratedVideoDecoder implementation.
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
   void SetStream(const uint8_t* ptr, size_t size) override;
@@ -119,7 +119,7 @@
   // displaying and giving them back. +2 instead of +1 because of subjective
   // smoothness improvement during testing.
   enum {
-    kPicsInPipeline = media::limits::kMaxVideoFrames + 2,
+    kPicsInPipeline = limits::kMaxVideoFrames + 2,
     kMaxNumReqPictures = H264DPB::kDPBMaxSize + kPicsInPipeline,
   };
 
@@ -140,10 +140,10 @@
   bool ProcessCurrentSlice();
 
   // Return true if we need to start a new picture.
-  bool IsNewPrimaryCodedPicture(const media::H264SliceHeader* slice_hdr) const;
+  bool IsNewPrimaryCodedPicture(const H264SliceHeader* slice_hdr) const;
 
   // Initialize the current picture according to data in |slice_hdr|.
-  bool InitCurrPicture(const media::H264SliceHeader* slice_hdr);
+  bool InitCurrPicture(const H264SliceHeader* slice_hdr);
 
   // Initialize |pic| as a "non-existing" picture (see spec) with |frame_num|,
   // to be used for frame gap concealment.
@@ -157,19 +157,19 @@
   // a picture with |frame_num|.
   void UpdatePicNums(int frame_num);
 
-  bool UpdateMaxNumReorderFrames(const media::H264SPS* sps);
+  bool UpdateMaxNumReorderFrames(const H264SPS* sps);
 
   // Prepare reference picture lists for the current frame.
-  void PrepareRefPicLists(const media::H264SliceHeader* slice_hdr);
+  void PrepareRefPicLists(const H264SliceHeader* slice_hdr);
   // Prepare reference picture lists for the given slice.
-  bool ModifyReferencePicLists(const media::H264SliceHeader* slice_hdr,
+  bool ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
                                H264Picture::Vector* ref_pic_list0,
                                H264Picture::Vector* ref_pic_list1);
 
   // Construct initial reference picture lists for use in decoding of
   // P and B pictures (see 8.2.4 in spec).
-  void ConstructReferencePicListsP(const media::H264SliceHeader* slice_hdr);
-  void ConstructReferencePicListsB(const media::H264SliceHeader* slice_hdr);
+  void ConstructReferencePicListsP(const H264SliceHeader* slice_hdr);
+  void ConstructReferencePicListsB(const H264SliceHeader* slice_hdr);
 
   // Helper functions for reference list construction, per spec.
   int PicNumF(const scoped_refptr<H264Picture>& pic);
@@ -179,7 +179,7 @@
   // specified in spec (8.2.4).
   //
   // |list| indicates list number and should be either 0 or 1.
-  bool ModifyReferencePicList(const media::H264SliceHeader* slice_hdr,
+  bool ModifyReferencePicList(const H264SliceHeader* slice_hdr,
                               int list,
                               H264Picture::Vector* ref_pic_listx);
 
@@ -195,7 +195,7 @@
   bool HandleFrameNumGap(int frame_num);
 
   // Start processing a new frame.
-  bool StartNewFrame(const media::H264SliceHeader* slice_hdr);
+  bool StartNewFrame(const H264SliceHeader* slice_hdr);
 
   // All data for a frame received, process it and decode.
   bool FinishPrevFrameIfPresent();
@@ -225,7 +225,7 @@
   State state_;
 
   // Parser in use.
-  media::H264Parser parser_;
+  H264Parser parser_;
 
   // DPB in use.
   H264DPB dpb_;
@@ -261,8 +261,8 @@
   int curr_pps_id_;
 
   // Current NALU and slice header being processed.
-  std::unique_ptr<media::H264NALU> curr_nalu_;
-  std::unique_ptr<media::H264SliceHeader> curr_slice_hdr_;
+  std::unique_ptr<H264NALU> curr_nalu_;
+  std::unique_ptr<H264SliceHeader> curr_slice_hdr_;
 
   // Output picture size.
   gfx::Size pic_size_;
diff --git a/media/gpu/h264_dpb.cc b/media/gpu/h264_dpb.cc
index ea1c4db8..150bd31f 100644
--- a/media/gpu/h264_dpb.cc
+++ b/media/gpu/h264_dpb.cc
@@ -28,7 +28,7 @@
       frame_num_offset(0),
       frame_num_wrap(0),
       long_term_frame_idx(0),
-      type(media::H264SliceHeader::kPSlice),
+      type(H264SliceHeader::kPSlice),
       nal_ref_idc(0),
       idr(false),
       idr_pic_id(0),
diff --git a/media/gpu/h264_dpb.h b/media/gpu/h264_dpb.h
index 7e473de..aef39a1b 100644
--- a/media/gpu/h264_dpb.h
+++ b/media/gpu/h264_dpb.h
@@ -58,7 +58,7 @@
   int frame_num_wrap;
   int long_term_frame_idx;
 
-  media::H264SliceHeader::Type type;
+  H264SliceHeader::Type type;
   int nal_ref_idc;
   bool idr;        // IDR picture?
   int idr_pic_id;  // Valid only if idr == true.
@@ -79,8 +79,7 @@
   // memory management after finishing this picture.
   bool long_term_reference_flag;
   bool adaptive_ref_pic_marking_mode_flag;
-  media::H264DecRefPicMarking
-      ref_pic_marking[media::H264SliceHeader::kRefListSize];
+  H264DecRefPicMarking ref_pic_marking[H264SliceHeader::kRefListSize];
 
   // Position in DPB (i.e. index in DPB).
   int dpb_position;
diff --git a/media/gpu/ipc/common/create_video_encoder_params.cc b/media/gpu/ipc/common/create_video_encoder_params.cc
index 790e8d3..11fe006 100644
--- a/media/gpu/ipc/common/create_video_encoder_params.cc
+++ b/media/gpu/ipc/common/create_video_encoder_params.cc
@@ -9,8 +9,8 @@
 namespace media {
 
 CreateVideoEncoderParams::CreateVideoEncoderParams()
-    : input_format(media::PIXEL_FORMAT_UNKNOWN),
-      output_profile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+    : input_format(PIXEL_FORMAT_UNKNOWN),
+      output_profile(VIDEO_CODEC_PROFILE_UNKNOWN),
       initial_bitrate(0),
       encoder_route_id(MSG_ROUTING_NONE) {}
 
diff --git a/media/gpu/ipc/common/create_video_encoder_params.h b/media/gpu/ipc/common/create_video_encoder_params.h
index b22203c..995a6d9 100644
--- a/media/gpu/ipc/common/create_video_encoder_params.h
+++ b/media/gpu/ipc/common/create_video_encoder_params.h
@@ -14,9 +14,9 @@
 struct CreateVideoEncoderParams {
   CreateVideoEncoderParams();
   ~CreateVideoEncoderParams();
-  media::VideoPixelFormat input_format;
+  VideoPixelFormat input_format;
   gfx::Size input_visible_size;
-  media::VideoCodecProfile output_profile;
+  VideoCodecProfile output_profile;
   uint32_t initial_bitrate;
   int32_t encoder_route_id;
 };
diff --git a/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
index bffc7fb..3e2071f6 100644
--- a/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
@@ -72,47 +72,45 @@
 
 namespace media {
 
-class GpuJpegDecodeAccelerator::Client
-    : public media::JpegDecodeAccelerator::Client,
-      public base::NonThreadSafe {
+class GpuJpegDecodeAccelerator::Client : public JpegDecodeAccelerator::Client,
+                                         public base::NonThreadSafe {
  public:
-  Client(media::GpuJpegDecodeAccelerator* owner, int32_t route_id)
+  Client(GpuJpegDecodeAccelerator* owner, int32_t route_id)
       : owner_(owner->AsWeakPtr()), route_id_(route_id) {}
 
   ~Client() override { DCHECK(CalledOnValidThread()); }
 
-  // media::JpegDecodeAccelerator::Client implementation.
+  // JpegDecodeAccelerator::Client implementation.
   void VideoFrameReady(int32_t bitstream_buffer_id) override {
     DCHECK(CalledOnValidThread());
     if (owner_)
       owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id,
-                                 media::JpegDecodeAccelerator::NO_ERRORS);
+                                 JpegDecodeAccelerator::NO_ERRORS);
   }
 
   void NotifyError(int32_t bitstream_buffer_id,
-                   media::JpegDecodeAccelerator::Error error) override {
+                   JpegDecodeAccelerator::Error error) override {
     DCHECK(CalledOnValidThread());
     if (owner_)
       owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, error);
   }
 
-  void Decode(const media::BitstreamBuffer& bitstream_buffer,
-              const scoped_refptr<media::VideoFrame>& video_frame) {
+  void Decode(const BitstreamBuffer& bitstream_buffer,
+              const scoped_refptr<VideoFrame>& video_frame) {
     DCHECK(CalledOnValidThread());
     DCHECK(accelerator_);
     accelerator_->Decode(bitstream_buffer, video_frame);
   }
 
-  void set_accelerator(
-      std::unique_ptr<media::JpegDecodeAccelerator> accelerator) {
+  void set_accelerator(std::unique_ptr<JpegDecodeAccelerator> accelerator) {
     DCHECK(CalledOnValidThread());
     accelerator_ = std::move(accelerator);
   }
 
  private:
-  base::WeakPtr<media::GpuJpegDecodeAccelerator> owner_;
+  base::WeakPtr<GpuJpegDecodeAccelerator> owner_;
   int32_t route_id_;
-  std::unique_ptr<media::JpegDecodeAccelerator> accelerator_;
+  std::unique_ptr<JpegDecodeAccelerator> accelerator_;
 };
 
 // Create, destroy, and RemoveClient run on child thread. All other methods run
@@ -185,7 +183,7 @@
 
   void NotifyDecodeStatusOnIOThread(int32_t route_id,
                                     int32_t buffer_id,
-                                    media::JpegDecodeAccelerator::Error error) {
+                                    JpegDecodeAccelerator::Error error) {
     DCHECK(io_task_runner_->BelongsToCurrentThread());
     SendOnIOThread(new AcceleratedJpegDecoderHostMsg_DecodeAck(
         route_id, buffer_id, error));
@@ -199,9 +197,8 @@
     TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::MessageFilter::OnDecode");
 
     if (!VerifyDecodeParams(params)) {
-      NotifyDecodeStatusOnIOThread(
-          *route_id, params.input_buffer.id(),
-          media::JpegDecodeAccelerator::INVALID_ARGUMENT);
+      NotifyDecodeStatusOnIOThread(*route_id, params.input_buffer.id(),
+                                   JpegDecodeAccelerator::INVALID_ARGUMENT);
       if (base::SharedMemory::IsHandleValid(params.output_video_frame_handle))
         base::SharedMemory::CloseHandle(params.output_video_frame_handle);
       return;
@@ -215,31 +212,28 @@
     if (!output_shm->Map(params.output_buffer_size)) {
       LOG(ERROR) << "Could not map output shared memory for input buffer id "
                  << params.input_buffer.id();
-      NotifyDecodeStatusOnIOThread(
-          *route_id, params.input_buffer.id(),
-          media::JpegDecodeAccelerator::PLATFORM_FAILURE);
+      NotifyDecodeStatusOnIOThread(*route_id, params.input_buffer.id(),
+                                   JpegDecodeAccelerator::PLATFORM_FAILURE);
       base::SharedMemory::CloseHandle(params.input_buffer.handle());
       return;
     }
 
     uint8_t* shm_memory = static_cast<uint8_t*>(output_shm->memory());
-    scoped_refptr<media::VideoFrame> frame =
-        media::VideoFrame::WrapExternalSharedMemory(
-            media::PIXEL_FORMAT_I420,          // format
-            params.coded_size,                 // coded_size
-            gfx::Rect(params.coded_size),      // visible_rect
-            params.coded_size,                 // natural_size
-            shm_memory,                        // data
-            params.output_buffer_size,         // data_size
-            params.output_video_frame_handle,  // handle
-            0,                                 // data_offset
-            base::TimeDelta());                // timestamp
+    scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalSharedMemory(
+        PIXEL_FORMAT_I420,                 // format
+        params.coded_size,                 // coded_size
+        gfx::Rect(params.coded_size),      // visible_rect
+        params.coded_size,                 // natural_size
+        shm_memory,                        // data
+        params.output_buffer_size,         // data_size
+        params.output_video_frame_handle,  // handle
+        0,                                 // data_offset
+        base::TimeDelta());                // timestamp
     if (!frame.get()) {
       LOG(ERROR) << "Could not create VideoFrame for input buffer id "
                  << params.input_buffer.id();
-      NotifyDecodeStatusOnIOThread(
-          *route_id, params.input_buffer.id(),
-          media::JpegDecodeAccelerator::PLATFORM_FAILURE);
+      NotifyDecodeStatusOnIOThread(*route_id, params.input_buffer.id(),
+                                   JpegDecodeAccelerator::PLATFORM_FAILURE);
       base::SharedMemory::CloseHandle(params.input_buffer.handle());
       return;
     }
@@ -324,9 +318,9 @@
   };
 
   std::unique_ptr<Client> client(new Client(this, route_id));
-  std::unique_ptr<media::JpegDecodeAccelerator> accelerator;
+  std::unique_ptr<JpegDecodeAccelerator> accelerator;
   for (const auto& create_jda_function : create_jda_fps) {
-    std::unique_ptr<media::JpegDecodeAccelerator> tmp_accelerator =
+    std::unique_ptr<JpegDecodeAccelerator> tmp_accelerator =
         (*create_jda_function)(io_task_runner_);
     if (tmp_accelerator && tmp_accelerator->Initialize(client.get())) {
       accelerator = std::move(tmp_accelerator);
@@ -363,7 +357,7 @@
 void GpuJpegDecodeAccelerator::NotifyDecodeStatus(
     int32_t route_id,
     int32_t buffer_id,
-    media::JpegDecodeAccelerator::Error error) {
+    JpegDecodeAccelerator::Error error) {
   DCHECK(CalledOnValidThread());
   Send(new AcceleratedJpegDecoderHostMsg_DecodeAck(route_id, buffer_id, error));
 }
@@ -384,10 +378,9 @@
 }
 
 // static
-std::unique_ptr<media::JpegDecodeAccelerator>
-GpuJpegDecodeAccelerator::CreateV4L2JDA(
+std::unique_ptr<JpegDecodeAccelerator> GpuJpegDecodeAccelerator::CreateV4L2JDA(
     const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) {
-  std::unique_ptr<media::JpegDecodeAccelerator> decoder;
+  std::unique_ptr<JpegDecodeAccelerator> decoder;
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
   scoped_refptr<V4L2Device> device =
       V4L2Device::Create(V4L2Device::kJpegDecoder);
@@ -398,10 +391,9 @@
 }
 
 // static
-std::unique_ptr<media::JpegDecodeAccelerator>
-GpuJpegDecodeAccelerator::CreateVaapiJDA(
+std::unique_ptr<JpegDecodeAccelerator> GpuJpegDecodeAccelerator::CreateVaapiJDA(
     const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) {
-  std::unique_ptr<media::JpegDecodeAccelerator> decoder;
+  std::unique_ptr<JpegDecodeAccelerator> decoder;
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
   decoder.reset(new VaapiJpegDecodeAccelerator(io_task_runner));
 #endif
@@ -415,7 +407,7 @@
       &GpuJpegDecodeAccelerator::CreateVaapiJDA,
   };
   for (const auto& create_jda_function : create_jda_fps) {
-    std::unique_ptr<media::JpegDecodeAccelerator> accelerator =
+    std::unique_ptr<JpegDecodeAccelerator> accelerator =
         (*create_jda_function)(base::ThreadTaskRunnerHandle::Get());
     if (accelerator && accelerator->IsSupported())
       return true;
diff --git a/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
index 0de3033..500daea 100644
--- a/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h
@@ -42,7 +42,7 @@
 
   void NotifyDecodeStatus(int32_t route_id,
                           int32_t bitstream_buffer_id,
-                          media::JpegDecodeAccelerator::Error error);
+                          JpegDecodeAccelerator::Error error);
 
   // Function to delegate sending to actual sender.
   bool Send(IPC::Message* message) override;
@@ -52,7 +52,7 @@
   static bool IsSupported();
 
  private:
-  using CreateJDAFp = std::unique_ptr<media::JpegDecodeAccelerator> (*)(
+  using CreateJDAFp = std::unique_ptr<JpegDecodeAccelerator> (*)(
       const scoped_refptr<base::SingleThreadTaskRunner>&);
 
   class Client;
@@ -60,9 +60,9 @@
 
   void ClientRemoved();
 
-  static std::unique_ptr<media::JpegDecodeAccelerator> CreateV4L2JDA(
+  static std::unique_ptr<JpegDecodeAccelerator> CreateV4L2JDA(
       const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
-  static std::unique_ptr<media::JpegDecodeAccelerator> CreateVaapiJDA(
+  static std::unique_ptr<JpegDecodeAccelerator> CreateVaapiJDA(
       const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
 
   // The lifetime of objects of this class is managed by a gpu::GpuChannel. The
diff --git a/media/gpu/ipc/service/gpu_video_decode_accelerator.cc b/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
index 280cce3..9504b48 100644
--- a/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
@@ -223,10 +223,10 @@
     uint32_t textures_per_buffer,
     const gfx::Size& dimensions,
     uint32_t texture_target) {
-  if (dimensions.width() > media::limits::kMaxDimension ||
-      dimensions.height() > media::limits::kMaxDimension ||
-      dimensions.GetArea() > media::limits::kMaxCanvas) {
-    NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
+  if (dimensions.width() > limits::kMaxDimension ||
+      dimensions.height() > limits::kMaxDimension ||
+      dimensions.GetArea() > limits::kMaxCanvas) {
+    NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
     return;
   }
   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
@@ -252,7 +252,7 @@
   uncleared_textures_.erase(picture_buffer_id);
 }
 
-void GpuVideoDecodeAccelerator::PictureReady(const media::Picture& picture) {
+void GpuVideoDecodeAccelerator::PictureReady(const Picture& picture) {
   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
   // the child thread. VDA is responsible to call PictureReady on the child
   // thread when a picture buffer is delivered the first time.
@@ -293,7 +293,7 @@
 }
 
 void GpuVideoDecodeAccelerator::NotifyError(
-    media::VideoDecodeAccelerator::Error error) {
+    VideoDecodeAccelerator::Error error) {
   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(host_route_id_,
                                                                  error))) {
     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
@@ -330,7 +330,7 @@
 }
 
 bool GpuVideoDecodeAccelerator::Initialize(
-    const media::VideoDecodeAccelerator::Config& config) {
+    const VideoDecodeAccelerator::Config& config) {
   DCHECK(!video_decode_accelerator_);
 
   if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
@@ -379,16 +379,16 @@
 // Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded,
 // otherwise on the main thread.
 void GpuVideoDecodeAccelerator::OnDecode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DCHECK(video_decode_accelerator_);
   video_decode_accelerator_->Decode(bitstream_buffer);
 }
 
 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
     const std::vector<int32_t>& buffer_ids,
-    const std::vector<media::PictureBuffer::TextureIds>& texture_ids) {
+    const std::vector<PictureBuffer::TextureIds>& texture_ids) {
   if (buffer_ids.size() != texture_ids.size()) {
-    NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+    NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
     return;
   }
 
@@ -396,22 +396,22 @@
   gpu::gles2::TextureManager* texture_manager =
       command_decoder->GetContextGroup()->texture_manager();
 
-  std::vector<media::PictureBuffer> buffers;
+  std::vector<PictureBuffer> buffers;
   std::vector<std::vector<scoped_refptr<gpu::gles2::TextureRef>>> textures;
   for (uint32_t i = 0; i < buffer_ids.size(); ++i) {
     if (buffer_ids[i] < 0) {
       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
-      NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+      NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
       return;
     }
     std::vector<scoped_refptr<gpu::gles2::TextureRef>> current_textures;
-    media::PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i];
-    media::PictureBuffer::TextureIds service_ids;
+    PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i];
+    PictureBuffer::TextureIds service_ids;
     if (buffer_texture_ids.size() != textures_per_buffer_) {
       DLOG(ERROR) << "Requested " << textures_per_buffer_
                   << " textures per picture buffer, got "
                   << buffer_texture_ids.size();
-      NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+      NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
       return;
     }
     for (size_t j = 0; j < textures_per_buffer_; j++) {
@@ -419,14 +419,14 @@
           texture_manager->GetTexture(buffer_texture_ids[j]);
       if (!texture_ref) {
         DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j];
-        NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+        NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
         return;
       }
       gpu::gles2::Texture* info = texture_ref->texture();
       if (info->target() != texture_target_) {
         DLOG(ERROR) << "Texture target mismatch for texture id "
                     << buffer_texture_ids[j];
-        NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+        NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
         return;
       }
       if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
@@ -446,7 +446,7 @@
             height != texture_dimensions_.height()) {
           DLOG(ERROR) << "Size mismatch for texture id "
                       << buffer_texture_ids[j];
-          NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+          NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
           return;
         }
 
@@ -463,8 +463,8 @@
       current_textures.push_back(texture_ref);
     }
     textures.push_back(current_textures);
-    buffers.push_back(media::PictureBuffer(buffer_ids[i], texture_dimensions_,
-                                           service_ids, buffer_texture_ids));
+    buffers.push_back(PictureBuffer(buffer_ids[i], texture_dimensions_,
+                                    service_ids, buffer_texture_ids));
   }
   {
     DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
@@ -501,8 +501,7 @@
   filter_removed_.Signal();
 }
 
-void GpuVideoDecodeAccelerator::SetTextureCleared(
-    const media::Picture& picture) {
+void GpuVideoDecodeAccelerator::SetTextureCleared(const Picture& picture) {
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
   auto it = uncleared_textures_.find(picture.picture_buffer_id());
diff --git a/media/gpu/ipc/service/gpu_video_decode_accelerator.h b/media/gpu/ipc/service/gpu_video_decode_accelerator.h
index 6653517..84414ce1 100644
--- a/media/gpu/ipc/service/gpu_video_decode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_video_decode_accelerator.h
@@ -34,7 +34,7 @@
 class GpuVideoDecodeAccelerator
     : public IPC::Listener,
       public IPC::Sender,
-      public media::VideoDecodeAccelerator::Client,
+      public VideoDecodeAccelerator::Client,
       public gpu::GpuCommandBufferStub::DestructionObserver {
  public:
   // Each of the arguments to the constructor must outlive this object.
@@ -54,7 +54,7 @@
   // IPC::Listener implementation.
   bool OnMessageReceived(const IPC::Message& message) override;
 
-  // media::VideoDecodeAccelerator::Client implementation.
+  // VideoDecodeAccelerator::Client implementation.
   void NotifyInitializationComplete(bool success) override;
   void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
                              VideoPixelFormat format,
@@ -62,11 +62,11 @@
                              const gfx::Size& dimensions,
                              uint32_t texture_target) override;
   void DismissPictureBuffer(int32_t picture_buffer_id) override;
-  void PictureReady(const media::Picture& picture) override;
+  void PictureReady(const Picture& picture) override;
   void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
   void NotifyFlushDone() override;
   void NotifyResetDone() override;
-  void NotifyError(media::VideoDecodeAccelerator::Error error) override;
+  void NotifyError(VideoDecodeAccelerator::Error error) override;
 
   // GpuCommandBufferStub::DestructionObserver implementation.
   void OnWillDestroyStub() override;
@@ -78,7 +78,7 @@
   // one of them succeeds for given |config|. Send the |init_done_msg| when
   // done. filter_ is passed to gpu::GpuCommandBufferStub channel only if the
   // chosen VDA can decode on IO thread.
-  bool Initialize(const media::VideoDecodeAccelerator::Config& config);
+  bool Initialize(const VideoDecodeAccelerator::Config& config);
 
  private:
   class MessageFilter;
@@ -87,10 +87,10 @@
   ~GpuVideoDecodeAccelerator() override;
 
   // Handlers for IPC messages.
-  void OnDecode(const media::BitstreamBuffer& bitstream_buffer);
+  void OnDecode(const BitstreamBuffer& bitstream_buffer);
   void OnAssignPictureBuffers(
       const std::vector<int32_t>& buffer_ids,
-      const std::vector<media::PictureBuffer::TextureIds>& texture_ids);
+      const std::vector<PictureBuffer::TextureIds>& texture_ids);
   void OnReusePictureBuffer(int32_t picture_buffer_id);
   void OnFlush();
   void OnReset();
@@ -100,7 +100,7 @@
   void OnFilterRemoved();
 
   // Sets the texture to cleared.
-  void SetTextureCleared(const media::Picture& picture);
+  void SetTextureCleared(const Picture& picture);
 
   // Route ID to communicate with the host.
   const int32_t host_route_id_;
@@ -111,7 +111,7 @@
   gpu::GpuCommandBufferStub* const stub_;
 
   // The underlying VideoDecodeAccelerator.
-  std::unique_ptr<media::VideoDecodeAccelerator> video_decode_accelerator_;
+  std::unique_ptr<VideoDecodeAccelerator> video_decode_accelerator_;
 
   // Callback to return current GLContext, if available.
   GetGLContextCallback get_gl_context_cb_;
diff --git a/media/gpu/ipc/service/gpu_video_encode_accelerator.cc b/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
index ff29fed..dc4e515 100644
--- a/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
+++ b/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
@@ -59,7 +59,7 @@
     gpu::GpuCommandBufferStub* stub)
     : host_route_id_(host_route_id),
       stub_(stub),
-      input_format_(media::PIXEL_FORMAT_UNKNOWN),
+      input_format_(PIXEL_FORMAT_UNKNOWN),
       output_buffer_size_(0),
       weak_this_factory_(this) {
   stub_->AddDestructionObserver(this);
@@ -73,11 +73,10 @@
   DCHECK(!encoder_);
 }
 
-bool GpuVideoEncodeAccelerator::Initialize(
-    media::VideoPixelFormat input_format,
-    const gfx::Size& input_visible_size,
-    media::VideoCodecProfile output_profile,
-    uint32_t initial_bitrate) {
+bool GpuVideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
+                                           const gfx::Size& input_visible_size,
+                                           VideoCodecProfile output_profile,
+                                           uint32_t initial_bitrate) {
   DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): "
            << "input_format=" << input_format
            << ", input_visible_size=" << input_visible_size.ToString()
@@ -91,9 +90,9 @@
     return false;
   }
 
-  if (input_visible_size.width() > media::limits::kMaxDimension ||
-      input_visible_size.height() > media::limits::kMaxDimension ||
-      input_visible_size.GetArea() > media::limits::kMaxCanvas) {
+  if (input_visible_size.width() > limits::kMaxDimension ||
+      input_visible_size.height() > limits::kMaxDimension ||
+      input_visible_size.GetArea() > limits::kMaxCanvas) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
                 << "input_visible_size " << input_visible_size.ToString()
                 << " too large";
@@ -158,7 +157,7 @@
 }
 
 void GpuVideoEncodeAccelerator::NotifyError(
-    media::VideoEncodeAccelerator::Error error) {
+    VideoEncodeAccelerator::Error error) {
   Send(new AcceleratedVideoEncoderHostMsg_NotifyError(host_route_id_, error));
 }
 
@@ -174,22 +173,20 @@
 gpu::VideoEncodeAcceleratorSupportedProfiles
 GpuVideoEncodeAccelerator::GetSupportedProfiles(
     const gpu::GpuPreferences& gpu_preferences) {
-  media::VideoEncodeAccelerator::SupportedProfiles profiles;
+  VideoEncodeAccelerator::SupportedProfiles profiles;
   std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps =
       CreateVEAFps(gpu_preferences);
 
   for (size_t i = 0; i < create_vea_fps.size(); ++i) {
-    std::unique_ptr<media::VideoEncodeAccelerator> encoder =
-        (*create_vea_fps[i])();
+    std::unique_ptr<VideoEncodeAccelerator> encoder = (*create_vea_fps[i])();
     if (!encoder)
       continue;
-    media::VideoEncodeAccelerator::SupportedProfiles vea_profiles =
+    VideoEncodeAccelerator::SupportedProfiles vea_profiles =
         encoder->GetSupportedProfiles();
-    media::GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(vea_profiles,
-                                                               &profiles);
+    GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(vea_profiles,
+                                                        &profiles);
   }
-  return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(
-      profiles);
+  return GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(profiles);
 }
 
 // static
@@ -216,9 +213,9 @@
 
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
 // static
-std::unique_ptr<media::VideoEncodeAccelerator>
+std::unique_ptr<VideoEncodeAccelerator>
 GpuVideoEncodeAccelerator::CreateV4L2VEA() {
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder;
+  std::unique_ptr<VideoEncodeAccelerator> encoder;
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
   if (device)
     encoder.reset(new V4L2VideoEncodeAccelerator(device));
@@ -228,27 +225,27 @@
 
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
 // static
-std::unique_ptr<media::VideoEncodeAccelerator>
+std::unique_ptr<VideoEncodeAccelerator>
 GpuVideoEncodeAccelerator::CreateVaapiVEA() {
-  return base::WrapUnique<media::VideoEncodeAccelerator>(
+  return base::WrapUnique<VideoEncodeAccelerator>(
       new VaapiVideoEncodeAccelerator());
 }
 #endif
 
 #if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
 // static
-std::unique_ptr<media::VideoEncodeAccelerator>
+std::unique_ptr<VideoEncodeAccelerator>
 GpuVideoEncodeAccelerator::CreateAndroidVEA() {
-  return base::WrapUnique<media::VideoEncodeAccelerator>(
+  return base::WrapUnique<VideoEncodeAccelerator>(
       new AndroidVideoEncodeAccelerator());
 }
 #endif
 
 #if defined(OS_MACOSX)
 // static
-std::unique_ptr<media::VideoEncodeAccelerator>
+std::unique_ptr<VideoEncodeAccelerator>
 GpuVideoEncodeAccelerator::CreateVTVEA() {
-  return base::WrapUnique<media::VideoEncodeAccelerator>(
+  return base::WrapUnique<VideoEncodeAccelerator>(
       new VTVideoEncodeAccelerator());
 }
 #endif
@@ -258,7 +255,7 @@
   DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode: frame_id = "
            << params.frame_id << ", buffer_size=" << params.buffer_size
            << ", force_keyframe=" << params.force_keyframe;
-  DCHECK_EQ(media::PIXEL_FORMAT_I420, input_format_);
+  DCHECK_EQ(PIXEL_FORMAT_I420, input_format_);
 
   // Wrap into a SharedMemory in the beginning, so that |params.buffer_handle|
   // is cleaned properly in case of an early return.
@@ -271,7 +268,7 @@
   if (params.frame_id < 0) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): invalid "
                 << "frame_id=" << params.frame_id;
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
 
@@ -285,31 +282,30 @@
   if (!map_offset.IsValid() || !map_size.IsValid()) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode():"
                 << " invalid (buffer_offset,buffer_size)";
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
 
   if (!shm->MapAt(map_offset.ValueOrDie(), map_size.ValueOrDie())) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
                 << "could not map frame_id=" << params.frame_id;
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
 
   uint8_t* shm_memory =
       reinterpret_cast<uint8_t*>(shm->memory()) + aligned_offset;
-  scoped_refptr<media::VideoFrame> frame =
-      media::VideoFrame::WrapExternalSharedMemory(
-          input_format_, input_coded_size_, gfx::Rect(input_visible_size_),
-          input_visible_size_, shm_memory, params.buffer_size,
-          params.buffer_handle, params.buffer_offset, params.timestamp);
+  scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalSharedMemory(
+      input_format_, input_coded_size_, gfx::Rect(input_visible_size_),
+      input_visible_size_, shm_memory, params.buffer_size, params.buffer_handle,
+      params.buffer_offset, params.timestamp);
   if (!frame) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
                 << "could not create a frame";
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
-  frame->AddDestructionObserver(media::BindToCurrentLoop(base::Bind(
+  frame->AddDestructionObserver(BindToCurrentLoop(base::Bind(
       &GpuVideoEncodeAccelerator::EncodeFrameFinished,
       weak_this_factory_.GetWeakPtr(), params.frame_id, base::Passed(&shm))));
   encoder_->Encode(frame, params.force_keyframe);
@@ -337,17 +333,17 @@
   if (buffer_id < 0) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
                 << "invalid buffer_id=" << buffer_id;
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
   if (buffer_size < output_buffer_size_) {
     DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
                 << "buffer too small for buffer_id=" << buffer_id;
-    NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+    NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
     return;
   }
   encoder_->UseOutputBitstreamBuffer(
-      media::BitstreamBuffer(buffer_id, buffer_handle, buffer_size));
+      BitstreamBuffer(buffer_id, buffer_handle, buffer_size));
 }
 
 void GpuVideoEncodeAccelerator::OnDestroy() {
diff --git a/media/gpu/ipc/service/gpu_video_encode_accelerator.h b/media/gpu/ipc/service/gpu_video_encode_accelerator.h
index 6af2515..9b86c08d 100644
--- a/media/gpu/ipc/service/gpu_video_encode_accelerator.h
+++ b/media/gpu/ipc/service/gpu_video_encode_accelerator.h
@@ -37,7 +37,7 @@
 // IPC coming in from the renderer and passes it to the underlying VEA.
 class GpuVideoEncodeAccelerator
     : public IPC::Listener,
-      public media::VideoEncodeAccelerator::Client,
+      public VideoEncodeAccelerator::Client,
       public gpu::GpuCommandBufferStub::DestructionObserver {
  public:
   GpuVideoEncodeAccelerator(int32_t host_route_id,
@@ -46,15 +46,15 @@
 
   // Initialize this accelerator with the given parameters and send
   // |init_done_msg| when complete.
-  bool Initialize(media::VideoPixelFormat input_format,
+  bool Initialize(VideoPixelFormat input_format,
                   const gfx::Size& input_visible_size,
-                  media::VideoCodecProfile output_profile,
+                  VideoCodecProfile output_profile,
                   uint32_t initial_bitrate);
 
   // IPC::Listener implementation
   bool OnMessageReceived(const IPC::Message& message) override;
 
-  // media::VideoEncodeAccelerator::Client implementation.
+  // VideoEncodeAccelerator::Client implementation.
   void RequireBitstreamBuffers(unsigned int input_count,
                                const gfx::Size& input_coded_size,
                                size_t output_buffer_size) override;
@@ -62,7 +62,7 @@
                             size_t payload_size,
                             bool key_frame,
                             base::TimeDelta timestamp) override;
-  void NotifyError(media::VideoEncodeAccelerator::Error error) override;
+  void NotifyError(VideoEncodeAccelerator::Error error) override;
 
   // gpu::GpuCommandBufferStub::DestructionObserver implementation.
   void OnWillDestroyStub() override;
@@ -74,26 +74,26 @@
       const gpu::GpuPreferences& gpu_preferences);
 
  private:
-  typedef std::unique_ptr<media::VideoEncodeAccelerator> (*CreateVEAFp)();
+  typedef std::unique_ptr<VideoEncodeAccelerator> (*CreateVEAFp)();
 
   // Return a set of VEA Create function pointers applicable to the current
   // platform.
   static std::vector<CreateVEAFp> CreateVEAFps(
       const gpu::GpuPreferences& gpu_preferences);
 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
-  static std::unique_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
+  static std::unique_ptr<VideoEncodeAccelerator> CreateV4L2VEA();
 #endif
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
-  static std::unique_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
+  static std::unique_ptr<VideoEncodeAccelerator> CreateVaapiVEA();
 #endif
 #if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
-  static std::unique_ptr<media::VideoEncodeAccelerator> CreateAndroidVEA();
+  static std::unique_ptr<VideoEncodeAccelerator> CreateAndroidVEA();
 #endif
 #if defined(OS_MACOSX)
-  static std::unique_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
+  static std::unique_ptr<VideoEncodeAccelerator> CreateVTVEA();
 #endif
 
-  // IPC handlers, proxying media::VideoEncodeAccelerator for the renderer
+  // IPC handlers, proxying VideoEncodeAccelerator for the renderer
   // process.
   void OnEncode(const AcceleratedVideoEncoderMsg_Encode_Params& params);
   void OnEncode2(const AcceleratedVideoEncoderMsg_Encode_Params2& params);
@@ -117,16 +117,16 @@
   gpu::GpuCommandBufferStub* const stub_;
 
   // Owned pointer to the underlying VideoEncodeAccelerator.
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder_;
+  std::unique_ptr<VideoEncodeAccelerator> encoder_;
   base::Callback<bool(void)> make_context_current_;
 
   // Video encoding parameters.
-  media::VideoPixelFormat input_format_;
+  VideoPixelFormat input_format_;
   gfx::Size input_visible_size_;
   gfx::Size input_coded_size_;
   size_t output_buffer_size_;
 
-  // Weak pointer for media::VideoFrames that refer back to |this|.
+  // Weak pointer for VideoFrames that refer back to |this|.
   base::WeakPtrFactory<GpuVideoEncodeAccelerator> weak_this_factory_;
 
   DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAccelerator);
diff --git a/media/gpu/ipc/service/media_channel.cc b/media/gpu/ipc/service/media_channel.cc
index f4bb4de..0d10231 100644
--- a/media/gpu/ipc/service/media_channel.cc
+++ b/media/gpu/ipc/service/media_channel.cc
@@ -37,14 +37,14 @@
 
   bool Send(IPC::Message* msg) { return channel_->Send(msg); }
 
-  void OnCreateVideoDecoder(const media::VideoDecodeAccelerator::Config& config,
+  void OnCreateVideoDecoder(const VideoDecodeAccelerator::Config& config,
                             int32_t decoder_route_id,
                             IPC::Message* reply_message) {
     channel_->OnCreateVideoDecoder(routing_id_, config, decoder_route_id,
                                    reply_message);
   }
 
-  void OnCreateVideoEncoder(const media::CreateVideoEncoderParams& params,
+  void OnCreateVideoEncoder(const CreateVideoEncoderParams& params,
                             IPC::Message* reply_message) {
     channel_->OnCreateVideoEncoder(routing_id_, params, reply_message);
   }
@@ -95,7 +95,7 @@
 
 void MediaChannel::OnCreateVideoDecoder(
     int32_t command_buffer_route_id,
-    const media::VideoDecodeAccelerator::Config& config,
+    const VideoDecodeAccelerator::Config& config,
     int32_t decoder_route_id,
     IPC::Message* reply_message) {
   TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoDecoder");
@@ -117,10 +117,9 @@
   // self-delete during destruction of this stub.
 }
 
-void MediaChannel::OnCreateVideoEncoder(
-    int32_t command_buffer_route_id,
-    const media::CreateVideoEncoderParams& params,
-    IPC::Message* reply_message) {
+void MediaChannel::OnCreateVideoEncoder(int32_t command_buffer_route_id,
+                                        const CreateVideoEncoderParams& params,
+                                        IPC::Message* reply_message) {
   TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoEncoder");
   gpu::GpuCommandBufferStub* stub =
       channel_->LookupCommandBuffer(command_buffer_route_id);
diff --git a/media/gpu/ipc/service/media_channel.h b/media/gpu/ipc/service/media_channel.h
index 3c6c1b0d..6e7c74c 100644
--- a/media/gpu/ipc/service/media_channel.h
+++ b/media/gpu/ipc/service/media_channel.h
@@ -42,11 +42,11 @@
   // Message handlers.
   void OnCreateJpegDecoder(int32_t route_id, IPC::Message* reply_msg);
   void OnCreateVideoDecoder(int32_t command_buffer_route_id,
-                            const media::VideoDecodeAccelerator::Config& config,
+                            const VideoDecodeAccelerator::Config& config,
                             int32_t route_id,
                             IPC::Message* reply_message);
   void OnCreateVideoEncoder(int32_t command_buffer_route_id,
-                            const media::CreateVideoEncoderParams& params,
+                            const CreateVideoEncoderParams& params,
                             IPC::Message* reply_message);
 
   gpu::GpuChannel* const channel_;
diff --git a/media/gpu/jpeg_decode_accelerator_unittest.cc b/media/gpu/jpeg_decode_accelerator_unittest.cc
index 5c68851..cc5e2c6 100644
--- a/media/gpu/jpeg_decode_accelerator_unittest.cc
+++ b/media/gpu/jpeg_decode_accelerator_unittest.cc
@@ -42,8 +42,6 @@
 #endif
 #endif
 
-using media::JpegDecodeAccelerator;
-
 namespace media {
 namespace {
 
@@ -72,7 +70,7 @@
   // The input content of |filename|.
   std::string data_str;
 
-  media::JpegParseResult parse_result;
+  JpegParseResult parse_result;
   gfx::Size visible_size;
   size_t output_size;
 };
@@ -250,38 +248,30 @@
 
   base::SharedMemoryHandle dup_handle;
   dup_handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
-  media::BitstreamBuffer bitstream_buffer(bitstream_buffer_id, dup_handle,
-                                          image_file->data_str.size());
-  scoped_refptr<media::VideoFrame> out_frame_ =
-      media::VideoFrame::WrapExternalSharedMemory(
-          media::PIXEL_FORMAT_I420,
-          image_file->visible_size,
-          gfx::Rect(image_file->visible_size),
-          image_file->visible_size,
-          static_cast<uint8_t*>(hw_out_shm_->memory()),
-          image_file->output_size,
-          hw_out_shm_->handle(),
-          0,
-          base::TimeDelta());
+  BitstreamBuffer bitstream_buffer(bitstream_buffer_id, dup_handle,
+                                   image_file->data_str.size());
+  scoped_refptr<VideoFrame> out_frame_ = VideoFrame::WrapExternalSharedMemory(
+      PIXEL_FORMAT_I420, image_file->visible_size,
+      gfx::Rect(image_file->visible_size), image_file->visible_size,
+      static_cast<uint8_t*>(hw_out_shm_->memory()), image_file->output_size,
+      hw_out_shm_->handle(), 0, base::TimeDelta());
   LOG_ASSERT(out_frame_.get());
   decoder_->Decode(bitstream_buffer, out_frame_);
 }
 
 bool JpegClient::GetSoftwareDecodeResult(int32_t bitstream_buffer_id) {
-  media::VideoPixelFormat format = media::PIXEL_FORMAT_I420;
+  VideoPixelFormat format = PIXEL_FORMAT_I420;
   TestImageFile* image_file = test_image_files_[bitstream_buffer_id];
 
   uint8_t* yplane = static_cast<uint8_t*>(sw_out_shm_->memory());
-  uint8_t* uplane =
-      yplane +
-      media::VideoFrame::PlaneSize(format, media::VideoFrame::kYPlane,
-                                   image_file->visible_size)
-          .GetArea();
-  uint8_t* vplane =
-      uplane +
-      media::VideoFrame::PlaneSize(format, media::VideoFrame::kUPlane,
-                                   image_file->visible_size)
-          .GetArea();
+  uint8_t* uplane = yplane +
+                    VideoFrame::PlaneSize(format, VideoFrame::kYPlane,
+                                          image_file->visible_size)
+                        .GetArea();
+  uint8_t* vplane = uplane +
+                    VideoFrame::PlaneSize(format, VideoFrame::kUPlane,
+                                          image_file->visible_size)
+                        .GetArea();
   int yplane_stride = image_file->visible_size.width();
   int uv_plane_stride = yplane_stride / 2;
 
@@ -370,8 +360,7 @@
   ASSERT_NO_FATAL_FAILURE(ReadTestJpegImage(test_640x360_jpeg_file_,
                                             image_data_640x360_black_.get()));
 
-  base::FilePath default_jpeg_file =
-      media::GetTestDataFilePath(kDefaultJpegFilename);
+  base::FilePath default_jpeg_file = GetTestDataFilePath(kDefaultJpegFilename);
   image_data_1280x720_default_.reset(new TestImageFile(kDefaultJpegFilename));
   ASSERT_NO_FATAL_FAILURE(
       ReadTestJpegImage(default_jpeg_file, image_data_1280x720_default_.get()));
@@ -379,15 +368,15 @@
   image_data_invalid_.reset(new TestImageFile("failure.jpg"));
   image_data_invalid_->data_str.resize(100, 0);
   image_data_invalid_->visible_size.SetSize(1280, 720);
-  image_data_invalid_->output_size = media::VideoFrame::AllocationSize(
-      media::PIXEL_FORMAT_I420, image_data_invalid_->visible_size);
+  image_data_invalid_->output_size = VideoFrame::AllocationSize(
+      PIXEL_FORMAT_I420, image_data_invalid_->visible_size);
 
   // |user_jpeg_filenames_| may include many files and use ';' as delimiter.
   std::vector<base::FilePath::StringType> filenames = base::SplitString(
       user_jpeg_filenames_, base::FilePath::StringType(1, ';'),
       base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   for (const auto& filename : filenames) {
-    base::FilePath input_file = media::GetTestDataFilePath(filename);
+    base::FilePath input_file = GetTestDataFilePath(filename);
     TestImageFile* image_data = new TestImageFile(filename);
     ASSERT_NO_FATAL_FAILURE(ReadTestJpegImage(input_file, image_data));
     image_data_user_.push_back(image_data);
@@ -425,14 +414,14 @@
     TestImageFile* image_data) {
   ASSERT_TRUE(base::ReadFileToString(input_file, &image_data->data_str));
 
-  ASSERT_TRUE(media::ParseJpegPicture(
+  ASSERT_TRUE(ParseJpegPicture(
       reinterpret_cast<const uint8_t*>(image_data->data_str.data()),
       image_data->data_str.size(), &image_data->parse_result));
   image_data->visible_size.SetSize(
       image_data->parse_result.frame_header.visible_width,
       image_data->parse_result.frame_header.visible_height);
-  image_data->output_size = media::VideoFrame::AllocationSize(
-      media::PIXEL_FORMAT_I420, image_data->visible_size);
+  image_data->output_size =
+      VideoFrame::AllocationSize(PIXEL_FORMAT_I420, image_data->visible_size);
 }
 
 class JpegDecodeAcceleratorTest : public ::testing::Test {
diff --git a/media/gpu/shared_memory_region.cc b/media/gpu/shared_memory_region.cc
index 13df2041a..38d2038 100644
--- a/media/gpu/shared_memory_region.cc
+++ b/media/gpu/shared_memory_region.cc
@@ -18,9 +18,8 @@
   DCHECK_GE(offset_, 0) << "Invalid offset: " << offset_;
 }
 
-SharedMemoryRegion::SharedMemoryRegion(
-    const media::BitstreamBuffer& bitstream_buffer,
-    bool read_only)
+SharedMemoryRegion::SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer,
+                                       bool read_only)
     : SharedMemoryRegion(bitstream_buffer.handle(),
                          bitstream_buffer.offset(),
                          bitstream_buffer.size(),
diff --git a/media/gpu/shared_memory_region.h b/media/gpu/shared_memory_region.h
index 2ca11d7..09984a7 100644
--- a/media/gpu/shared_memory_region.h
+++ b/media/gpu/shared_memory_region.h
@@ -29,8 +29,7 @@
                      bool read_only);
 
   // Creates a SharedMemoryRegion from the given |bistream_buffer|.
-  SharedMemoryRegion(const media::BitstreamBuffer& bitstream_buffer,
-                     bool read_only);
+  SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer, bool read_only);
 
   // Maps the shared memory into the caller's address space.
   // Return true on success, false otherwise.
diff --git a/media/gpu/v4l2_device.cc b/media/gpu/v4l2_device.cc
index a291104..0550438 100644
--- a/media/gpu/v4l2_device.cc
+++ b/media/gpu/v4l2_device.cc
@@ -38,40 +38,38 @@
 }
 
 // static
-media::VideoPixelFormat V4L2Device::V4L2PixFmtToVideoPixelFormat(
-    uint32_t pix_fmt) {
+VideoPixelFormat V4L2Device::V4L2PixFmtToVideoPixelFormat(uint32_t pix_fmt) {
   switch (pix_fmt) {
     case V4L2_PIX_FMT_NV12:
     case V4L2_PIX_FMT_NV12M:
-      return media::PIXEL_FORMAT_NV12;
+      return PIXEL_FORMAT_NV12;
 
     case V4L2_PIX_FMT_MT21:
-      return media::PIXEL_FORMAT_MT21;
+      return PIXEL_FORMAT_MT21;
 
     case V4L2_PIX_FMT_YUV420:
     case V4L2_PIX_FMT_YUV420M:
-      return media::PIXEL_FORMAT_I420;
+      return PIXEL_FORMAT_I420;
 
     case V4L2_PIX_FMT_RGB32:
-      return media::PIXEL_FORMAT_ARGB;
+      return PIXEL_FORMAT_ARGB;
 
     default:
       LOG(FATAL) << "Add more cases as needed";
-      return media::PIXEL_FORMAT_UNKNOWN;
+      return PIXEL_FORMAT_UNKNOWN;
   }
 }
 
 // static
-uint32_t V4L2Device::VideoPixelFormatToV4L2PixFmt(
-    media::VideoPixelFormat format) {
+uint32_t V4L2Device::VideoPixelFormatToV4L2PixFmt(VideoPixelFormat format) {
   switch (format) {
-    case media::PIXEL_FORMAT_NV12:
+    case PIXEL_FORMAT_NV12:
       return V4L2_PIX_FMT_NV12M;
 
-    case media::PIXEL_FORMAT_MT21:
+    case PIXEL_FORMAT_MT21:
       return V4L2_PIX_FMT_MT21;
 
-    case media::PIXEL_FORMAT_I420:
+    case PIXEL_FORMAT_I420:
       return V4L2_PIX_FMT_YUV420M;
 
     default:
@@ -81,22 +79,19 @@
 }
 
 // static
-uint32_t V4L2Device::VideoCodecProfileToV4L2PixFmt(
-    media::VideoCodecProfile profile,
-    bool slice_based) {
-  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
+uint32_t V4L2Device::VideoCodecProfileToV4L2PixFmt(VideoCodecProfile profile,
+                                                   bool slice_based) {
+  if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
     if (slice_based)
       return V4L2_PIX_FMT_H264_SLICE;
     else
       return V4L2_PIX_FMT_H264;
-  } else if (profile >= media::VP8PROFILE_MIN &&
-             profile <= media::VP8PROFILE_MAX) {
+  } else if (profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX) {
     if (slice_based)
       return V4L2_PIX_FMT_VP8_FRAME;
     else
       return V4L2_PIX_FMT_VP8;
-  } else if (profile >= media::VP9PROFILE_MIN &&
-             profile <= media::VP9PROFILE_MAX) {
+  } else if (profile >= VP9PROFILE_MIN && profile <= VP9PROFILE_MAX) {
     return V4L2_PIX_FMT_VP9;
   } else {
     LOG(FATAL) << "Add more cases as needed";
@@ -128,7 +123,7 @@
 gfx::Size V4L2Device::CodedSizeFromV4L2Format(struct v4l2_format format) {
   gfx::Size coded_size;
   gfx::Size visible_size;
-  media::VideoPixelFormat frame_format = media::PIXEL_FORMAT_UNKNOWN;
+  VideoPixelFormat frame_format = PIXEL_FORMAT_UNKNOWN;
   size_t bytesperline = 0;
   // Total bytes in the frame.
   size_t sizeimage = 0;
@@ -166,12 +161,12 @@
   // We need bits per pixel for one component only to calculate
   // coded_width from bytesperline.
   int plane_horiz_bits_per_pixel =
-      media::VideoFrame::PlaneHorizontalBitsPerPixel(frame_format, 0);
+      VideoFrame::PlaneHorizontalBitsPerPixel(frame_format, 0);
 
   // Adding up bpp for each component will give us total bpp for all components.
   int total_bpp = 0;
-  for (size_t i = 0; i < media::VideoFrame::NumPlanes(frame_format); ++i)
-    total_bpp += media::VideoFrame::PlaneBitsPerPixel(frame_format, i);
+  for (size_t i = 0; i < VideoFrame::NumPlanes(frame_format); ++i)
+    total_bpp += VideoFrame::PlaneBitsPerPixel(frame_format, i);
 
   if (sizeimage == 0 || bytesperline == 0 || plane_horiz_bits_per_pixel == 0 ||
       total_bpp == 0 || (bytesperline * 8) % plane_horiz_bits_per_pixel != 0) {
@@ -192,15 +187,14 @@
   // some drivers (Exynos) like to have some additional alignment that is not a
   // multiple of bytesperline. The best thing we can do is to compensate by
   // aligning to next full row.
-  if (sizeimage > media::VideoFrame::AllocationSize(frame_format, coded_size))
+  if (sizeimage > VideoFrame::AllocationSize(frame_format, coded_size))
     coded_size.SetSize(coded_width, coded_height + 1);
   DVLOG(3) << "coded_size=" << coded_size.ToString();
 
   // Sanity checks. Calculated coded size has to contain given visible size
   // and fulfill buffer byte size requirements.
   DCHECK(gfx::Rect(coded_size).Contains(gfx::Rect(visible_size)));
-  DCHECK_LE(sizeimage,
-            media::VideoFrame::AllocationSize(frame_format, coded_size));
+  DCHECK_LE(sizeimage, VideoFrame::AllocationSize(frame_format, coded_size));
 
   return coded_size;
 }
@@ -253,12 +247,12 @@
   }
 }
 
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 V4L2Device::GetSupportedDecodeProfiles(const size_t num_formats,
                                        const uint32_t pixelformats[]) {
   DCHECK_EQ(type_, kDecoder);
-  media::VideoDecodeAccelerator::SupportedProfiles profiles;
-  media::VideoDecodeAccelerator::SupportedProfile profile;
+  VideoDecodeAccelerator::SupportedProfiles profiles;
+  VideoDecodeAccelerator::SupportedProfile profile;
   v4l2_fmtdesc fmtdesc;
   memset(&fmtdesc, 0, sizeof(fmtdesc));
   fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
@@ -271,17 +265,17 @@
     switch (fmtdesc.pixelformat) {
       case V4L2_PIX_FMT_H264:
       case V4L2_PIX_FMT_H264_SLICE:
-        min_profile = media::H264PROFILE_MIN;
-        max_profile = media::H264PROFILE_MAX;
+        min_profile = H264PROFILE_MIN;
+        max_profile = H264PROFILE_MAX;
         break;
       case V4L2_PIX_FMT_VP8:
       case V4L2_PIX_FMT_VP8_FRAME:
-        min_profile = media::VP8PROFILE_MIN;
-        max_profile = media::VP8PROFILE_MAX;
+        min_profile = VP8PROFILE_MIN;
+        max_profile = VP8PROFILE_MAX;
         break;
       case V4L2_PIX_FMT_VP9:
-        min_profile = media::VP9PROFILE_MIN;
-        max_profile = media::VP9PROFILE_MAX;
+        min_profile = VP9PROFILE_MIN;
+        max_profile = VP9PROFILE_MAX;
         break;
       default:
         NOTREACHED() << "Unhandled pixelformat " << std::hex
@@ -292,7 +286,7 @@
                            &profile.max_resolution);
     for (int media_profile = min_profile; media_profile <= max_profile;
          ++media_profile) {
-      profile.profile = static_cast<media::VideoCodecProfile>(media_profile);
+      profile.profile = static_cast<VideoCodecProfile>(media_profile);
       profiles.push_back(profile);
     }
   }
@@ -300,7 +294,7 @@
 }
 
 bool V4L2Device::SupportsDecodeProfileForV4L2PixelFormats(
-    media::VideoCodecProfile profile,
+    VideoCodecProfile profile,
     const size_t num_formats,
     const uint32_t pixelformats[]) {
   // Get all supported profiles by this device, taking into account only fourccs
@@ -311,7 +305,7 @@
   // Try to find requested profile among the returned supported_profiles.
   const auto iter = std::find_if(
       supported_profiles.begin(), supported_profiles.end(),
-      [profile](const media::VideoDecodeAccelerator::SupportedProfile& p) {
+      [profile](const VideoDecodeAccelerator::SupportedProfile& p) {
         return profile == p.profile;
       });
 
diff --git a/media/gpu/v4l2_device.h b/media/gpu/v4l2_device.h
index 1c765e95..edf4686 100644
--- a/media/gpu/v4l2_device.h
+++ b/media/gpu/v4l2_device.h
@@ -35,11 +35,10 @@
     : public base::RefCountedThreadSafe<V4L2Device> {
  public:
   // Utility format conversion functions
-  static media::VideoPixelFormat V4L2PixFmtToVideoPixelFormat(uint32_t format);
-  static uint32_t VideoPixelFormatToV4L2PixFmt(media::VideoPixelFormat format);
-  static uint32_t VideoCodecProfileToV4L2PixFmt(
-      media::VideoCodecProfile profile,
-      bool slice_based);
+  static VideoPixelFormat V4L2PixFmtToVideoPixelFormat(uint32_t format);
+  static uint32_t VideoPixelFormatToV4L2PixFmt(VideoPixelFormat format);
+  static uint32_t VideoCodecProfileToV4L2PixFmt(VideoCodecProfile profile,
+                                                bool slice_based);
   static uint32_t V4L2PixFmtToDrmFormat(uint32_t format);
   // Convert format requirements requested by a V4L2 device to gfx::Size.
   static gfx::Size CodedSizeFromV4L2Format(struct v4l2_format format);
@@ -137,16 +136,15 @@
 
   // Return supported profiles for decoder, including only profiles for given
   // fourcc |pixelformats|.
-  media::VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles(
+  VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles(
       const size_t num_formats,
       const uint32_t pixelformats[]);
 
   // Return true if the device supports |profile|, taking into account only
   // fourccs from the given array of |pixelformats| of size |num_formats|.
-  bool SupportsDecodeProfileForV4L2PixelFormats(
-      media::VideoCodecProfile profile,
-      const size_t num_formats,
-      const uint32_t pixelformats[]);
+  bool SupportsDecodeProfileForV4L2PixelFormats(VideoCodecProfile profile,
+                                                const size_t num_formats,
+                                                const uint32_t pixelformats[]);
 
  protected:
   friend class base::RefCountedThreadSafe<V4L2Device>;
diff --git a/media/gpu/v4l2_image_processor.cc b/media/gpu/v4l2_image_processor.cc
index 22c8696..5d503f2 100644
--- a/media/gpu/v4l2_image_processor.cc
+++ b/media/gpu/v4l2_image_processor.cc
@@ -53,8 +53,8 @@
 V4L2ImageProcessor::JobRecord::~JobRecord() {}
 
 V4L2ImageProcessor::V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device)
-    : input_format_(media::PIXEL_FORMAT_UNKNOWN),
-      output_format_(media::PIXEL_FORMAT_UNKNOWN),
+    : input_format_(PIXEL_FORMAT_UNKNOWN),
+      output_format_(PIXEL_FORMAT_UNKNOWN),
       input_memory_type_(V4L2_MEMORY_USERPTR),
       input_format_fourcc_(0),
       output_format_fourcc_(0),
@@ -96,8 +96,8 @@
   error_cb_.Run();
 }
 
-bool V4L2ImageProcessor::Initialize(media::VideoPixelFormat input_format,
-                                    media::VideoPixelFormat output_format,
+bool V4L2ImageProcessor::Initialize(VideoPixelFormat input_format,
+                                    VideoPixelFormat output_format,
                                     v4l2_memory input_memory_type,
                                     gfx::Size input_visible_size,
                                     gfx::Size input_allocated_size,
@@ -155,9 +155,8 @@
                  base::Unretained(this)));
 
   DVLOG(1) << "V4L2ImageProcessor initialized for "
-           << " input_format:" << media::VideoPixelFormatToString(input_format)
-           << ", output_format:"
-           << media::VideoPixelFormatToString(output_format)
+           << " input_format:" << VideoPixelFormatToString(input_format)
+           << ", output_format:" << VideoPixelFormatToString(output_format)
            << ", input_visible_size: " << input_visible_size.ToString()
            << ", input_allocated_size: " << input_allocated_size_.ToString()
            << ", input_planes_count: " << input_planes_count_
@@ -218,7 +217,7 @@
   return true;
 }
 
-void V4L2ImageProcessor::Process(const scoped_refptr<media::VideoFrame>& frame,
+void V4L2ImageProcessor::Process(const scoped_refptr<VideoFrame>& frame,
                                  int output_buffer_index,
                                  const FrameReadyCB& cb) {
   DVLOG(3) << __func__ << ": ts=" << frame->timestamp().InMilliseconds();
@@ -599,8 +598,8 @@
   qbuf.length = input_planes_count_;
   for (size_t i = 0; i < input_planes_count_; ++i) {
     qbuf.m.planes[i].bytesused =
-        media::VideoFrame::PlaneSize(input_record.frame->format(), i,
-                                     input_allocated_size_)
+        VideoFrame::PlaneSize(input_record.frame->format(), i,
+                              input_allocated_size_)
             .GetArea();
     qbuf.m.planes[i].length = qbuf.m.planes[i].bytesused;
     if (input_memory_type_ == V4L2_MEMORY_USERPTR) {
diff --git a/media/gpu/v4l2_image_processor.h b/media/gpu/v4l2_image_processor.h
index e448a4d..3e456b9 100644
--- a/media/gpu/v4l2_image_processor.h
+++ b/media/gpu/v4l2_image_processor.h
@@ -38,8 +38,8 @@
   // input buffers and output buffers will be |num_buffers|. Provided |error_cb|
   // will be called if an error occurs. Return true if the requested
   // configuration is supported.
-  bool Initialize(media::VideoPixelFormat input_format,
-                  media::VideoPixelFormat output_format,
+  bool Initialize(VideoPixelFormat input_format,
+                  VideoPixelFormat output_format,
                   v4l2_memory input_memory_type,
                   gfx::Size input_visible_size,
                   gfx::Size input_allocated_size,
@@ -85,7 +85,7 @@
   // stored in |output_buffer_index| output buffer and notified via |cb|. The
   // processor will drop all its references to |frame| after it finishes
   // accessing it.
-  void Process(const scoped_refptr<media::VideoFrame>& frame,
+  void Process(const scoped_refptr<VideoFrame>& frame,
                int output_buffer_index,
                const FrameReadyCB& cb);
 
@@ -99,7 +99,7 @@
   struct InputRecord {
     InputRecord();
     ~InputRecord();
-    scoped_refptr<media::VideoFrame> frame;
+    scoped_refptr<VideoFrame> frame;
     bool at_device;
   };
 
@@ -117,7 +117,7 @@
   struct JobRecord {
     JobRecord();
     ~JobRecord();
-    scoped_refptr<media::VideoFrame> frame;
+    scoped_refptr<VideoFrame> frame;
     int output_buffer_index;
     FrameReadyCB ready_cb;
   };
@@ -158,8 +158,8 @@
   gfx::Size output_visible_size_;
   gfx::Size output_allocated_size_;
 
-  media::VideoPixelFormat input_format_;
-  media::VideoPixelFormat output_format_;
+  VideoPixelFormat input_format_;
+  VideoPixelFormat output_format_;
   v4l2_memory input_memory_type_;
   uint32_t input_format_fourcc_;
   uint32_t output_format_fourcc_;
diff --git a/media/gpu/v4l2_jpeg_decode_accelerator.cc b/media/gpu/v4l2_jpeg_decode_accelerator.cc
index e8f4cc6..b3109191 100644
--- a/media/gpu/v4l2_jpeg_decode_accelerator.cc
+++ b/media/gpu/v4l2_jpeg_decode_accelerator.cc
@@ -113,8 +113,8 @@
 V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {}
 
 V4L2JpegDecodeAccelerator::JobRecord::JobRecord(
-    const media::BitstreamBuffer& bitstream_buffer,
-    scoped_refptr<media::VideoFrame> video_frame)
+    const BitstreamBuffer& bitstream_buffer,
+    scoped_refptr<VideoFrame> video_frame)
     : bitstream_buffer_id(bitstream_buffer.id()),
       shm(bitstream_buffer, true),
       out_frame(video_frame) {}
@@ -226,8 +226,8 @@
 }
 
 void V4L2JpegDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer,
-    const scoped_refptr<media::VideoFrame>& video_frame) {
+    const BitstreamBuffer& bitstream_buffer,
+    const scoped_refptr<VideoFrame>& video_frame) {
   DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id()
            << ", size=" << bitstream_buffer.size();
   DCHECK(io_task_runner_->BelongsToCurrentThread());
@@ -240,7 +240,7 @@
     return;
   }
 
-  if (video_frame->format() != media::PIXEL_FORMAT_I420) {
+  if (video_frame->format() != PIXEL_FORMAT_I420) {
     PostNotifyError(bitstream_buffer.id(), UNSUPPORTED_JPEG);
     return;
   }
@@ -388,8 +388,8 @@
   DCHECK(!running_jobs_.empty());
   linked_ptr<JobRecord> job_record = running_jobs_.front();
 
-  size_t frame_size = media::VideoFrame::AllocationSize(
-      media::PIXEL_FORMAT_I420, job_record->out_frame->coded_size());
+  size_t frame_size = VideoFrame::AllocationSize(
+      PIXEL_FORMAT_I420, job_record->out_frame->coded_size());
   struct v4l2_format format;
   memset(&format, 0, sizeof(format));
   format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -413,7 +413,7 @@
   DCHECK(output_buffer_map_.empty());
   output_buffer_map_.resize(reqbufs.count);
 
-  media::VideoPixelFormat output_format =
+  VideoPixelFormat output_format =
       V4L2Device::V4L2PixFmtToVideoPixelFormat(output_buffer_pixelformat_);
 
   for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
@@ -427,7 +427,7 @@
     IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
 
     DCHECK_GE(buffer.length,
-              media::VideoFrame::AllocationSize(
+              VideoFrame::AllocationSize(
                   output_format,
                   gfx::Size(format.fmt.pix.width, format.fmt.pix.height)));
 
@@ -616,16 +616,16 @@
 static bool CopyOutputImage(const uint32_t src_pixelformat,
                             const void* src_addr,
                             const gfx::Size& src_coded_size,
-                            const scoped_refptr<media::VideoFrame>& dst_frame) {
-  media::VideoPixelFormat format =
+                            const scoped_refptr<VideoFrame>& dst_frame) {
+  VideoPixelFormat format =
       V4L2Device::V4L2PixFmtToVideoPixelFormat(src_pixelformat);
-  size_t src_size = media::VideoFrame::AllocationSize(format, src_coded_size);
-  uint8_t* dst_y = dst_frame->data(media::VideoFrame::kYPlane);
-  uint8_t* dst_u = dst_frame->data(media::VideoFrame::kUPlane);
-  uint8_t* dst_v = dst_frame->data(media::VideoFrame::kVPlane);
-  size_t dst_y_stride = dst_frame->stride(media::VideoFrame::kYPlane);
-  size_t dst_u_stride = dst_frame->stride(media::VideoFrame::kUPlane);
-  size_t dst_v_stride = dst_frame->stride(media::VideoFrame::kVPlane);
+  size_t src_size = VideoFrame::AllocationSize(format, src_coded_size);
+  uint8_t* dst_y = dst_frame->data(VideoFrame::kYPlane);
+  uint8_t* dst_u = dst_frame->data(VideoFrame::kUPlane);
+  uint8_t* dst_v = dst_frame->data(VideoFrame::kVPlane);
+  size_t dst_y_stride = dst_frame->stride(VideoFrame::kYPlane);
+  size_t dst_u_stride = dst_frame->stride(VideoFrame::kUPlane);
+  size_t dst_v_stride = dst_frame->stride(VideoFrame::kVPlane);
 
   // If the source format is I420, ConvertToI420 will simply copy the frame.
   if (libyuv::ConvertToI420(static_cast<uint8_t*>(const_cast<void*>(src_addr)),
@@ -746,7 +746,7 @@
   uint8_t marker1, marker2;
   READ_U8_OR_RETURN_FALSE(reader, &marker1);
   READ_U8_OR_RETURN_FALSE(reader, &marker2);
-  if (marker1 != media::JPEG_MARKER_PREFIX || marker2 != media::JPEG_SOI) {
+  if (marker1 != JPEG_MARKER_PREFIX || marker2 != JPEG_SOI) {
     DLOG(ERROR) << __func__ << ": The input is not a Jpeg";
     return false;
   }
@@ -758,13 +758,13 @@
   while (!has_marker_sos && !has_marker_dht) {
     const char* start_addr = reader.ptr();
     READ_U8_OR_RETURN_FALSE(reader, &marker1);
-    if (marker1 != media::JPEG_MARKER_PREFIX) {
+    if (marker1 != JPEG_MARKER_PREFIX) {
       DLOG(ERROR) << __func__ << ": marker1 != 0xFF";
       return false;
     }
     do {
       READ_U8_OR_RETURN_FALSE(reader, &marker2);
-    } while (marker2 == media::JPEG_MARKER_PREFIX);  // skip fill bytes
+    } while (marker2 == JPEG_MARKER_PREFIX);  // skip fill bytes
 
     uint16_t size;
     READ_U16_OR_RETURN_FALSE(reader, &size);
@@ -777,11 +777,11 @@
     size -= sizeof(size);
 
     switch (marker2) {
-      case media::JPEG_DHT: {
+      case JPEG_DHT: {
         has_marker_dht = true;
         break;
       }
-      case media::JPEG_SOS: {
+      case JPEG_SOS: {
         if (!has_marker_dht) {
           memcpy(static_cast<uint8_t*>(output_ptr) + current_offset,
                  kDefaultDhtSeg, sizeof(kDefaultDhtSeg));
diff --git a/media/gpu/v4l2_jpeg_decode_accelerator.h b/media/gpu/v4l2_jpeg_decode_accelerator.h
index 97f067c1..b282494 100644
--- a/media/gpu/v4l2_jpeg_decode_accelerator.h
+++ b/media/gpu/v4l2_jpeg_decode_accelerator.h
@@ -28,17 +28,17 @@
 namespace media {
 
 class MEDIA_GPU_EXPORT V4L2JpegDecodeAccelerator
-    : public media::JpegDecodeAccelerator {
+    : public JpegDecodeAccelerator {
  public:
   V4L2JpegDecodeAccelerator(
       const scoped_refptr<V4L2Device>& device,
       const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
   ~V4L2JpegDecodeAccelerator() override;
 
-  // media::JpegDecodeAccelerator implementation.
+  // JpegDecodeAccelerator implementation.
   bool Initialize(Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer,
-              const scoped_refptr<media::VideoFrame>& video_frame) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer,
+              const scoped_refptr<VideoFrame>& video_frame) override;
   bool IsSupported() override;
 
  private:
@@ -60,8 +60,8 @@
   // the time of submission we may not have one available (and don't need one
   // to submit input to the device).
   struct JobRecord {
-    JobRecord(const media::BitstreamBuffer& bitstream_buffer,
-              scoped_refptr<media::VideoFrame> video_frame);
+    JobRecord(const BitstreamBuffer& bitstream_buffer,
+              scoped_refptr<VideoFrame> video_frame);
     ~JobRecord();
 
     // Input image buffer ID.
@@ -69,7 +69,7 @@
     // Memory mapped from |bitstream_buffer|.
     SharedMemoryRegion shm;
     // Output frame buffer.
-    scoped_refptr<media::VideoFrame> out_frame;
+    scoped_refptr<VideoFrame> out_frame;
   };
 
   void EnqueueInput();
diff --git a/media/gpu/v4l2_slice_video_decode_accelerator.cc b/media/gpu/v4l2_slice_video_decode_accelerator.cc
index 29cf9e4..d82b708 100644
--- a/media/gpu/v4l2_slice_video_decode_accelerator.cc
+++ b/media/gpu/v4l2_slice_video_decode_accelerator.cc
@@ -226,15 +226,15 @@
 }
 
 struct V4L2SliceVideoDecodeAccelerator::PictureRecord {
-  PictureRecord(bool cleared, const media::Picture& picture);
+  PictureRecord(bool cleared, const Picture& picture);
   ~PictureRecord();
   bool cleared;  // Whether the texture is cleared and safe to render from.
-  media::Picture picture;  // The decoded picture.
+  Picture picture;  // The decoded picture.
 };
 
 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
     bool cleared,
-    const media::Picture& picture)
+    const Picture& picture)
     : cleared(cleared), picture(picture) {}
 
 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
@@ -248,16 +248,16 @@
   // H264Decoder::H264Accelerator implementation.
   scoped_refptr<H264Picture> CreateH264Picture() override;
 
-  bool SubmitFrameMetadata(const media::H264SPS* sps,
-                           const media::H264PPS* pps,
+  bool SubmitFrameMetadata(const H264SPS* sps,
+                           const H264PPS* pps,
                            const H264DPB& dpb,
                            const H264Picture::Vector& ref_pic_listp0,
                            const H264Picture::Vector& ref_pic_listb0,
                            const H264Picture::Vector& ref_pic_listb1,
                            const scoped_refptr<H264Picture>& pic) override;
 
-  bool SubmitSlice(const media::H264PPS* pps,
-                   const media::H264SliceHeader* slice_hdr,
+  bool SubmitSlice(const H264PPS* pps,
+                   const H264SliceHeader* slice_hdr,
                    const H264Picture::Vector& ref_pic_list0,
                    const H264Picture::Vector& ref_pic_list1,
                    const scoped_refptr<H264Picture>& pic,
@@ -303,7 +303,7 @@
   scoped_refptr<VP8Picture> CreateVP8Picture() override;
 
   bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
-                    const media::Vp8FrameHeader* frame_hdr,
+                    const Vp8FrameHeader* frame_hdr,
                     const scoped_refptr<VP8Picture>& last_frame,
                     const scoped_refptr<VP8Picture>& golden_frame,
                     const scoped_refptr<VP8Picture>& alt_frame) override;
@@ -392,7 +392,7 @@
       input_buffer_queued_count_(0),
       output_streamon_(false),
       output_buffer_queued_count_(0),
-      video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+      video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
       output_format_fourcc_(0),
       state_(kUninitialized),
       output_mode_(Config::OutputMode::ALLOCATE),
@@ -470,12 +470,11 @@
 
   video_profile_ = config.profile;
 
-  if (video_profile_ >= media::H264PROFILE_MIN &&
-      video_profile_ <= media::H264PROFILE_MAX) {
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
     h264_accelerator_.reset(new V4L2H264Accelerator(this));
     decoder_.reset(new H264Decoder(h264_accelerator_.get()));
-  } else if (video_profile_ >= media::VP8PROFILE_MIN &&
-             video_profile_ <= media::VP8PROFILE_MAX) {
+  } else if (video_profile_ >= VP8PROFILE_MIN &&
+             video_profile_ <= VP8PROFILE_MAX) {
     vp8_accelerator_.reset(new V4L2VP8Accelerator(this));
     decoder_.reset(new VP8Decoder(vp8_accelerator_.get()));
   } else {
@@ -1241,7 +1240,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DVLOGF(3) << "input_id=" << bitstream_buffer.id()
             << ", size=" << bitstream_buffer.size();
   DCHECK(decode_task_runner_->BelongsToCurrentThread());
@@ -1260,7 +1259,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DVLOGF(3) << "input_id=" << bitstream_buffer.id()
             << " size=" << bitstream_buffer.size();
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
@@ -1495,7 +1494,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DVLOGF(3);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
@@ -1506,7 +1505,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DVLOGF(3);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kAwaitingPictureBuffers);
@@ -2056,8 +2055,8 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
-    const media::H264SPS* sps,
-    const media::H264PPS* pps,
+    const H264SPS* sps,
+    const H264PPS* pps,
     const H264DPB& dpb,
     const H264Picture::Vector& ref_pic_listp0,
     const H264Picture::Vector& ref_pic_listb0,
@@ -2215,8 +2214,8 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
-    const media::H264PPS* pps,
-    const media::H264SliceHeader* slice_hdr,
+    const H264PPS* pps,
+    const H264SliceHeader* slice_hdr,
     const H264Picture::Vector& ref_pic_list0,
     const H264Picture::Vector& ref_pic_list1,
     const scoped_refptr<H264Picture>& pic,
@@ -2446,7 +2445,7 @@
   } while (0)
 
 static void FillV4L2SegmentationHeader(
-    const media::Vp8SegmentationHeader& vp8_sgmnt_hdr,
+    const Vp8SegmentationHeader& vp8_sgmnt_hdr,
     struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) {
 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
   v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
@@ -2468,7 +2467,7 @@
 }
 
 static void FillV4L2LoopfilterHeader(
-    const media::Vp8LoopFilterHeader& vp8_loopfilter_hdr,
+    const Vp8LoopFilterHeader& vp8_loopfilter_hdr,
     struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) {
 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
   v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
@@ -2490,7 +2489,7 @@
 }
 
 static void FillV4L2QuantizationHeader(
-    const media::Vp8QuantizationHeader& vp8_quant_hdr,
+    const Vp8QuantizationHeader& vp8_quant_hdr,
     struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) {
   v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi;
   v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta;
@@ -2501,7 +2500,7 @@
 }
 
 static void FillV4L2EntropyHeader(
-    const media::Vp8EntropyHeader& vp8_entropy_hdr,
+    const Vp8EntropyHeader& vp8_entropy_hdr,
     struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) {
   ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs,
                        vp8_entropy_hdr.coeff_probs);
@@ -2514,7 +2513,7 @@
 
 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
     const scoped_refptr<VP8Picture>& pic,
-    const media::Vp8FrameHeader* frame_hdr,
+    const Vp8FrameHeader* frame_hdr,
     const scoped_refptr<VP8Picture>& last_frame,
     const scoped_refptr<VP8Picture>& golden_frame,
     const scoped_refptr<VP8Picture>& alt_frame) {
@@ -2698,8 +2697,8 @@
   // TODO(posciak): Use visible size from decoder here instead
   // (crbug.com/402760). Passing (0, 0) results in the client using the
   // visible size extracted from the container instead.
-  media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
-                         gfx::Rect(0, 0), false);
+  Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
+                  gfx::Rect(0, 0), false);
   DVLOGF(3) << dec_surface->ToString()
             << ", bitstream_id: " << picture.bitstream_buffer_id()
             << ", picture_id: " << picture.picture_buffer_id();
@@ -2742,7 +2741,7 @@
   bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_);
   while (!pending_picture_ready_.empty()) {
     bool cleared = pending_picture_ready_.front().cleared;
-    const media::Picture& picture = pending_picture_ready_.front().picture;
+    const Picture& picture = pending_picture_ready_.front().picture;
     if (cleared && picture_clearing_count_ == 0) {
       DVLOGF(4) << "Posting picture ready to decode task runner for: "
                 << picture.picture_buffer_id();
@@ -2798,7 +2797,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
   if (!device)
diff --git a/media/gpu/v4l2_slice_video_decode_accelerator.h b/media/gpu/v4l2_slice_video_decode_accelerator.h
index 0ccbcc1..b0131d9a 100644
--- a/media/gpu/v4l2_slice_video_decode_accelerator.h
+++ b/media/gpu/v4l2_slice_video_decode_accelerator.h
@@ -34,7 +34,7 @@
 // decoding functionality and requires userspace to provide support for parsing
 // the input stream and managing decoder state across frames.
 class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator {
+    : public VideoDecodeAccelerator {
  public:
   class V4L2DecodeSurface;
 
@@ -45,11 +45,10 @@
       const MakeGLContextCurrentCallback& make_context_current_cb);
   ~V4L2SliceVideoDecodeAccelerator() override;
 
-  // media::VideoDecodeAccelerator implementation.
+  // VideoDecodeAccelerator implementation.
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ImportBufferForPicture(
       int32_t picture_buffer_id,
       const gfx::GpuMemoryBufferHandle& gpu_memory_buffer_handle) override;
@@ -62,8 +61,7 @@
       const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
       override;
 
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
 
  private:
   class V4L2H264Accelerator;
@@ -229,8 +227,7 @@
 
   // Allocate V4L2 buffers and assign them to |buffers| provided by the client
   // via AssignPictureBuffers() on decoder thread.
-  void AssignPictureBuffersTask(
-      const std::vector<media::PictureBuffer>& buffers);
+  void AssignPictureBuffersTask(const std::vector<PictureBuffer>& buffers);
 
   // Use buffer backed by dmabuf file descriptors in |passed_dmabuf_fds| for the
   // OutputRecord associated with |picture_buffer_id|, taking ownership of the
@@ -307,7 +304,7 @@
   const int kFlushBufferId = -2;
 
   // Handler for Decode() on decoder_thread_.
-  void DecodeTask(const media::BitstreamBuffer& bitstream_buffer);
+  void DecodeTask(const BitstreamBuffer& bitstream_buffer);
 
   // Schedule a new DecodeBufferTask if we are decoding.
   void ScheduleDecodeBufferTaskIfNeeded();
@@ -396,7 +393,7 @@
   // Mapping of int index to an output buffer record.
   std::vector<OutputRecord> output_buffer_map_;
 
-  media::VideoCodecProfile video_profile_;
+  VideoCodecProfile video_profile_;
   uint32_t output_format_fourcc_;
   gfx::Size visible_size_;
   gfx::Size coded_size_;
diff --git a/media/gpu/v4l2_video_decode_accelerator.cc b/media/gpu/v4l2_video_decode_accelerator.cc
index c43a3c0..6e5ee517 100644
--- a/media/gpu/v4l2_video_decode_accelerator.cc
+++ b/media/gpu/v4l2_video_decode_accelerator.cc
@@ -85,10 +85,10 @@
 };
 
 struct V4L2VideoDecodeAccelerator::PictureRecord {
-  PictureRecord(bool cleared, const media::Picture& picture);
+  PictureRecord(bool cleared, const Picture& picture);
   ~PictureRecord();
   bool cleared;  // Whether the texture is cleared and safe to render from.
-  media::Picture picture;  // The decoded picture.
+  Picture picture;  // The decoded picture.
 };
 
 V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
@@ -136,9 +136,8 @@
 
 V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
 
-V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(
-    bool cleared,
-    const media::Picture& picture)
+V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(bool cleared,
+                                                         const Picture& picture)
     : cleared(cleared), picture(picture) {}
 
 V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
@@ -172,7 +171,7 @@
       egl_display_(egl_display),
       get_gl_context_cb_(get_gl_context_cb),
       make_context_current_cb_(make_context_current_cb),
-      video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+      video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
       output_format_fourcc_(0),
       egl_image_format_fourcc_(0),
       egl_image_planes_count_(0),
@@ -272,9 +271,8 @@
   sub.type = V4L2_EVENT_SOURCE_CHANGE;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_SUBSCRIBE_EVENT, &sub);
 
-  if (video_profile_ >= media::H264PROFILE_MIN &&
-      video_profile_ <= media::H264PROFILE_MAX) {
-    decoder_h264_parser_.reset(new media::H264Parser());
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
   }
 
   if (!CreateInputBuffers())
@@ -297,7 +295,7 @@
 }
 
 void V4L2VideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id()
            << ", size=" << bitstream_buffer.size();
   DCHECK(decode_task_runner_->BelongsToCurrentThread());
@@ -317,7 +315,7 @@
 }
 
 void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DVLOG(3) << "AssignPictureBuffers(): buffer_count=" << buffers.size();
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
@@ -521,7 +519,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 V4L2VideoDecodeAccelerator::GetSupportedProfiles() {
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
   if (!device)
@@ -532,7 +530,7 @@
 }
 
 void V4L2VideoDecodeAccelerator::DecodeTask(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DVLOG(3) << "DecodeTask(): input_id=" << bitstream_buffer.id();
   DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
   DCHECK_NE(decoder_state_, kUninitialized);
@@ -696,13 +694,12 @@
 bool V4L2VideoDecodeAccelerator::AdvanceFrameFragment(const uint8_t* data,
                                                       size_t size,
                                                       size_t* endpos) {
-  if (video_profile_ >= media::H264PROFILE_MIN &&
-      video_profile_ <= media::H264PROFILE_MAX) {
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
     // For H264, we need to feed HW one frame at a time.  This is going to take
     // some parsing of our input stream.
     decoder_h264_parser_->SetStream(data, size);
-    media::H264NALU nalu;
-    media::H264Parser::Result result;
+    H264NALU nalu;
+    H264Parser::Result result;
     *endpos = 0;
 
     // Keep on peeking the next NALs while they don't indicate a frame
@@ -710,17 +707,17 @@
     for (;;) {
       bool end_of_frame = false;
       result = decoder_h264_parser_->AdvanceToNextNALU(&nalu);
-      if (result == media::H264Parser::kInvalidStream ||
-          result == media::H264Parser::kUnsupportedStream)
+      if (result == H264Parser::kInvalidStream ||
+          result == H264Parser::kUnsupportedStream)
         return false;
-      if (result == media::H264Parser::kEOStream) {
+      if (result == H264Parser::kEOStream) {
         // We've reached the end of the buffer before finding a frame boundary.
         decoder_partial_frame_pending_ = true;
         return true;
       }
       switch (nalu.nal_unit_type) {
-        case media::H264NALU::kNonIDRSlice:
-        case media::H264NALU::kIDRSlice:
+        case H264NALU::kNonIDRSlice:
+        case H264NALU::kIDRSlice:
           if (nalu.size < 1)
             return false;
           // For these two, if the "first_mb_in_slice" field is zero, start a
@@ -733,17 +730,17 @@
             break;
           }
           break;
-        case media::H264NALU::kSEIMessage:
-        case media::H264NALU::kSPS:
-        case media::H264NALU::kPPS:
-        case media::H264NALU::kAUD:
-        case media::H264NALU::kEOSeq:
-        case media::H264NALU::kEOStream:
-        case media::H264NALU::kReserved14:
-        case media::H264NALU::kReserved15:
-        case media::H264NALU::kReserved16:
-        case media::H264NALU::kReserved17:
-        case media::H264NALU::kReserved18:
+        case H264NALU::kSEIMessage:
+        case H264NALU::kSPS:
+        case H264NALU::kPPS:
+        case H264NALU::kAUD:
+        case H264NALU::kEOSeq:
+        case H264NALU::kEOStream:
+        case H264NALU::kReserved14:
+        case H264NALU::kReserved15:
+        case H264NALU::kReserved16:
+        case H264NALU::kReserved17:
+        case H264NALU::kReserved18:
           // These unconditionally signal a frame boundary.
           end_of_frame = true;
           break;
@@ -769,8 +766,8 @@
     NOTREACHED();
     return false;
   } else {
-    DCHECK_GE(video_profile_, media::VP8PROFILE_MIN);
-    DCHECK_LE(video_profile_, media::VP9PROFILE_MAX);
+    DCHECK_GE(video_profile_, VP8PROFILE_MIN);
+    DCHECK_LE(video_profile_, VP9PROFILE_MAX);
     // For VP8/9, we can just dump the entire buffer.  No fragmentation needed,
     // and we never return a partial frame.
     *endpos = size;
@@ -1176,26 +1173,24 @@
         for (auto& fd : output_record.fds) {
           fds.push_back(fd.get());
         }
-        scoped_refptr<media::VideoFrame> frame =
-            media::VideoFrame::WrapExternalDmabufs(
-                V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
-                coded_size_, gfx::Rect(visible_size_), visible_size_, fds,
-                base::TimeDelta());
+        scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalDmabufs(
+            V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
+            coded_size_, gfx::Rect(visible_size_), visible_size_, fds,
+            base::TimeDelta());
         // Unretained is safe because |this| owns image processor and there will
         // be no callbacks after processor destroys. Also, this class ensures it
         // is safe to post a task from child thread to decoder thread using
         // Unretained.
         image_processor_->Process(
             frame, dqbuf.index,
-            media::BindToCurrentLoop(
+            BindToCurrentLoop(
                 base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed,
                            base::Unretained(this), bitstream_buffer_id)));
       } else {
         output_record.state = kAtClient;
         decoder_frames_at_client_++;
-        const media::Picture picture(output_record.picture_id,
-                                     bitstream_buffer_id,
-                                     gfx::Rect(visible_size_), false);
+        const Picture picture(output_record.picture_id, bitstream_buffer_id,
+                              gfx::Rect(visible_size_), false);
         pending_picture_ready_.push(
             PictureRecord(output_record.cleared, picture));
         SendPictureReady();
@@ -1486,9 +1481,8 @@
     return;
 
   // Reset format-specific bits.
-  if (video_profile_ >= media::H264PROFILE_MIN &&
-      video_profile_ <= media::H264PROFILE_MAX) {
-    decoder_h264_parser_.reset(new media::H264Parser());
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
   }
 
   // Jobs drained, we're finished resetting.
@@ -2159,7 +2153,7 @@
       (decoder_state_ == kResetting || decoder_flushing_);
   while (pending_picture_ready_.size() > 0) {
     bool cleared = pending_picture_ready_.front().cleared;
-    const media::Picture& picture = pending_picture_ready_.front().picture;
+    const Picture& picture = pending_picture_ready_.front().picture;
     if (cleared && picture_clearing_count_ == 0) {
       // This picture is cleared. It can be posted to a thread different than
       // the main GPU thread to reduce latency. This should be the case after
@@ -2222,8 +2216,8 @@
     output_record.state = kAtClient;
     decoder_frames_at_client_++;
     image_processor_bitstream_buffer_ids_.pop();
-    const media::Picture picture(output_record.picture_id, bitstream_buffer_id,
-                                 gfx::Rect(visible_size_), false);
+    const Picture picture(output_record.picture_id, bitstream_buffer_id,
+                          gfx::Rect(visible_size_), false);
     pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
     SendPictureReady();
     output_record.cleared = true;
diff --git a/media/gpu/v4l2_video_decode_accelerator.h b/media/gpu/v4l2_video_decode_accelerator.h
index 2a0ae89b..a756fb6 100644
--- a/media/gpu/v4l2_video_decode_accelerator.h
+++ b/media/gpu/v4l2_video_decode_accelerator.h
@@ -46,11 +46,11 @@
 // is waited on with epoll().  There are three threads involved in this class:
 //
 // * The child thread, which is the main GPU process thread which calls the
-//   media::VideoDecodeAccelerator entry points.  Calls from this thread
+//   VideoDecodeAccelerator entry points.  Calls from this thread
 //   generally do not block (with the exception of Initialize() and Destroy()).
 //   They post tasks to the decoder_thread_, which actually services the task
 //   and calls back when complete through the
-//   media::VideoDecodeAccelerator::Client interface.
+//   VideoDecodeAccelerator::Client interface.
 // * The decoder_thread_, owned by this class.  It services API tasks, through
 //   the *Task() routines, as well as V4L2 device events, through
 //   ServiceDeviceTask().  Almost all state modification is done on this thread
@@ -85,7 +85,7 @@
 //   buffrers. We cannot drop any frame during resolution change. So V4L2VDA
 //   should destroy output buffers after image processor returns all the frames.
 class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator {
+    : public VideoDecodeAccelerator {
  public:
   V4L2VideoDecodeAccelerator(
       EGLDisplay egl_display,
@@ -94,12 +94,11 @@
       const scoped_refptr<V4L2Device>& device);
   ~V4L2VideoDecodeAccelerator() override;
 
-  // media::VideoDecodeAccelerator implementation.
+  // VideoDecodeAccelerator implementation.
   // Note: Initialize() and Destroy() are synchronous.
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
@@ -109,8 +108,7 @@
       const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
       override;
 
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
 
  private:
   // These are rather subjectively tuned.
@@ -124,9 +122,9 @@
     kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p,
     // Number of output buffers to use for each VDA stage above what's required
     // by the decoder (e.g. DPB size, in H264).  We need
-    // media::limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
+    // limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
     // and +1 for a frame in transit.
-    kDpbOutputBufferExtraCount = media::limits::kMaxVideoFrames + 1,
+    kDpbOutputBufferExtraCount = limits::kMaxVideoFrames + 1,
   };
 
   // Internal state of the decoder.
@@ -195,7 +193,7 @@
   // Enqueue a BitstreamBuffer to decode.  This will enqueue a buffer to the
   // decoder_input_queue_, then queue a DecodeBufferTask() to actually decode
   // the buffer.
-  void DecodeTask(const media::BitstreamBuffer& bitstream_buffer);
+  void DecodeTask(const BitstreamBuffer& bitstream_buffer);
 
   // Decode from the buffers queued in decoder_input_queue_.  Calls
   // DecodeBufferInitial() or DecodeBufferContinue() as appropriate.
@@ -403,7 +401,7 @@
   std::queue<linked_ptr<BitstreamBufferRef>> decoder_input_queue_;
   // For H264 decode, hardware requires that we send it frame-sized chunks.
   // We'll need to parse the stream.
-  std::unique_ptr<media::H264Parser> decoder_h264_parser_;
+  std::unique_ptr<H264Parser> decoder_h264_parser_;
   // Set if the decoder has a pending incomplete frame in an input buffer.
   bool decoder_partial_frame_pending_;
 
@@ -479,7 +477,7 @@
   MakeGLContextCurrentCallback make_context_current_cb_;
 
   // The codec we'll be decoding for.
-  media::VideoCodecProfile video_profile_;
+  VideoCodecProfile video_profile_;
   // Chosen output format.
   uint32_t output_format_fourcc_;
 
diff --git a/media/gpu/v4l2_video_encode_accelerator.cc b/media/gpu/v4l2_video_encode_accelerator.cc
index 9afa645..a297b89 100644
--- a/media/gpu/v4l2_video_encode_accelerator.cc
+++ b/media/gpu/v4l2_video_encode_accelerator.cc
@@ -80,7 +80,7 @@
     const scoped_refptr<V4L2Device>& device)
     : child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
       output_buffer_byte_size_(0),
-      device_input_format_(media::PIXEL_FORMAT_UNKNOWN),
+      device_input_format_(PIXEL_FORMAT_UNKNOWN),
       input_planes_count_(0),
       output_format_fourcc_(0),
       encoder_state_(kUninitialized),
@@ -106,14 +106,13 @@
   DestroyOutputBuffers();
 }
 
-bool V4L2VideoEncodeAccelerator::Initialize(
-    media::VideoPixelFormat input_format,
-    const gfx::Size& input_visible_size,
-    media::VideoCodecProfile output_profile,
-    uint32_t initial_bitrate,
-    Client* client) {
+bool V4L2VideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
+                                            const gfx::Size& input_visible_size,
+                                            VideoCodecProfile output_profile,
+                                            uint32_t initial_bitrate,
+                                            Client* client) {
   DVLOG(3) << __func__
-           << ": input_format=" << media::VideoPixelFormatToString(input_format)
+           << ": input_format=" << VideoPixelFormatToString(input_format)
            << ", input_visible_size=" << input_visible_size.ToString()
            << ", output_profile=" << output_profile
            << ", initial_bitrate=" << initial_bitrate;
@@ -143,7 +142,7 @@
 
   if (input_format != device_input_format_) {
     DVLOG(1) << "Input format not supported by the HW, will convert to "
-             << media::VideoPixelFormatToString(device_input_format_);
+             << VideoPixelFormatToString(device_input_format_);
 
     scoped_refptr<V4L2Device> device =
         V4L2Device::Create(V4L2Device::kImageProcessor);
@@ -220,9 +219,8 @@
   NOTIFY_ERROR(kPlatformFailureError);
 }
 
-void V4L2VideoEncodeAccelerator::Encode(
-    const scoped_refptr<media::VideoFrame>& frame,
-    bool force_keyframe) {
+void V4L2VideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+                                        bool force_keyframe) {
   DVLOG(3) << "Encode(): force_keyframe=" << force_keyframe;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
@@ -251,7 +249,7 @@
 }
 
 void V4L2VideoEncodeAccelerator::UseOutputBitstreamBuffer(
-    const media::BitstreamBuffer& buffer) {
+    const BitstreamBuffer& buffer) {
   DVLOG(3) << "UseOutputBitstreamBuffer(): id=" << buffer.id();
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
@@ -319,7 +317,7 @@
   delete this;
 }
 
-media::VideoEncodeAccelerator::SupportedProfiles
+VideoEncodeAccelerator::SupportedProfiles
 V4L2VideoEncodeAccelerator::GetSupportedProfiles() {
   SupportedProfiles profiles;
   SupportedProfile profile;
@@ -335,21 +333,21 @@
                                     &profile.max_resolution);
     switch (fmtdesc.pixelformat) {
       case V4L2_PIX_FMT_H264:
-        profile.profile = media::H264PROFILE_MAIN;
+        profile.profile = H264PROFILE_MAIN;
         profiles.push_back(profile);
         break;
       case V4L2_PIX_FMT_VP8:
-        profile.profile = media::VP8PROFILE_ANY;
+        profile.profile = VP8PROFILE_ANY;
         profiles.push_back(profile);
         break;
       case V4L2_PIX_FMT_VP9:
-        profile.profile = media::VP9PROFILE_PROFILE0;
+        profile.profile = VP9PROFILE_PROFILE0;
         profiles.push_back(profile);
-        profile.profile = media::VP9PROFILE_PROFILE1;
+        profile.profile = VP9PROFILE_PROFILE1;
         profiles.push_back(profile);
-        profile.profile = media::VP9PROFILE_PROFILE2;
+        profile.profile = VP9PROFILE_PROFILE2;
         profiles.push_back(profile);
-        profile.profile = media::VP9PROFILE_PROFILE3;
+        profile.profile = VP9PROFILE_PROFILE3;
         profiles.push_back(profile);
         break;
     }
@@ -374,15 +372,14 @@
   for (auto& fd : scoped_fds) {
     fds.push_back(fd.get());
   }
-  scoped_refptr<media::VideoFrame> output_frame =
-      media::VideoFrame::WrapExternalDmabufs(
-          device_input_format_, image_processor_->output_allocated_size(),
-          gfx::Rect(visible_size_), visible_size_, fds, timestamp);
+  scoped_refptr<VideoFrame> output_frame = VideoFrame::WrapExternalDmabufs(
+      device_input_format_, image_processor_->output_allocated_size(),
+      gfx::Rect(visible_size_), visible_size_, fds, timestamp);
   if (!output_frame) {
     NOTIFY_ERROR(kPlatformFailureError);
     return;
   }
-  output_frame->AddDestructionObserver(media::BindToCurrentLoop(
+  output_frame->AddDestructionObserver(BindToCurrentLoop(
       base::Bind(&V4L2VideoEncodeAccelerator::ReuseImageProcessorOutputBuffer,
                  weak_this_, output_buffer_index)));
 
@@ -405,7 +402,7 @@
 }
 
 void V4L2VideoEncodeAccelerator::EncodeTask(
-    const scoped_refptr<media::VideoFrame>& frame,
+    const scoped_refptr<VideoFrame>& frame,
     bool force_keyframe) {
   DVLOG(3) << "EncodeTask(): force_keyframe=" << force_keyframe;
   DCHECK_EQ(encoder_thread_.message_loop(), base::MessageLoop::current());
@@ -684,7 +681,7 @@
   DCHECK(!encoder_input_queue_.empty());
 
   // Enqueue an input (VIDEO_OUTPUT) buffer.
-  scoped_refptr<media::VideoFrame> frame = encoder_input_queue_.front();
+  scoped_refptr<VideoFrame> frame = encoder_input_queue_.front();
   const int index = free_input_buffers_.back();
   InputRecord& input_record = input_buffer_map_[index];
   DCHECK(!input_record.at_device);
@@ -703,7 +700,7 @@
   DCHECK_EQ(device_input_format_, frame->format());
   for (size_t i = 0; i < input_planes_count_; ++i) {
     qbuf.m.planes[i].bytesused = base::checked_cast<__u32>(
-        media::VideoFrame::PlaneSize(frame->format(), i, input_allocated_size_)
+        VideoFrame::PlaneSize(frame->format(), i, input_allocated_size_)
             .GetArea());
 
     switch (input_memory_type_) {
@@ -923,7 +920,7 @@
 }
 
 bool V4L2VideoEncodeAccelerator::SetOutputFormat(
-    media::VideoCodecProfile output_profile) {
+    VideoCodecProfile output_profile) {
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DCHECK(!input_streamon_);
   DCHECK(!output_streamon_);
@@ -957,13 +954,13 @@
 }
 
 bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
-    media::VideoPixelFormat input_format) {
+    VideoPixelFormat input_format) {
   DVLOG(3) << "NegotiateInputFormat()";
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DCHECK(!input_streamon_);
   DCHECK(!output_streamon_);
 
-  device_input_format_ = media::PIXEL_FORMAT_UNKNOWN;
+  device_input_format_ = PIXEL_FORMAT_UNKNOWN;
   input_planes_count_ = 0;
 
   uint32_t input_format_fourcc =
@@ -973,7 +970,7 @@
     return false;
   }
 
-  size_t input_planes_count = media::VideoFrame::NumPlanes(input_format);
+  size_t input_planes_count = VideoFrame::NumPlanes(input_format);
   DCHECK_LE(input_planes_count, static_cast<size_t>(VIDEO_MAX_PLANES));
 
   // First see if we the device can use the provided input_format directly.
@@ -989,12 +986,12 @@
     input_format_fourcc = device_->PreferredInputFormat();
     input_format =
         V4L2Device::V4L2PixFmtToVideoPixelFormat(input_format_fourcc);
-    if (input_format == media::PIXEL_FORMAT_UNKNOWN) {
+    if (input_format == PIXEL_FORMAT_UNKNOWN) {
       LOG(ERROR) << "Unsupported input format" << input_format_fourcc;
       return false;
     }
 
-    input_planes_count = media::VideoFrame::NumPlanes(input_format);
+    input_planes_count = VideoFrame::NumPlanes(input_format);
     DCHECK_LE(input_planes_count, static_cast<size_t>(VIDEO_MAX_PLANES));
 
     // Device might have adjusted parameters, reset them along with the format.
@@ -1022,9 +1019,8 @@
   return true;
 }
 
-bool V4L2VideoEncodeAccelerator::SetFormats(
-    media::VideoPixelFormat input_format,
-    media::VideoCodecProfile output_profile) {
+bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format,
+                                            VideoCodecProfile output_profile) {
   DVLOG(3) << "SetFormats()";
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DCHECK(!input_streamon_);
diff --git a/media/gpu/v4l2_video_encode_accelerator.h b/media/gpu/v4l2_video_encode_accelerator.h
index a57735b..de8d6d2 100644
--- a/media/gpu/v4l2_video_encode_accelerator.h
+++ b/media/gpu/v4l2_video_encode_accelerator.h
@@ -41,22 +41,21 @@
 // format conversion, if the input format requested via Initialize() is not
 // accepted by the hardware codec.
 class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
-    : public media::VideoEncodeAccelerator {
+    : public VideoEncodeAccelerator {
  public:
   explicit V4L2VideoEncodeAccelerator(const scoped_refptr<V4L2Device>& device);
   ~V4L2VideoEncodeAccelerator() override;
 
-  // media::VideoEncodeAccelerator implementation.
-  media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
-      override;
-  bool Initialize(media::VideoPixelFormat format,
+  // VideoEncodeAccelerator implementation.
+  VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+  bool Initialize(VideoPixelFormat format,
                   const gfx::Size& input_visible_size,
-                  media::VideoCodecProfile output_profile,
+                  VideoCodecProfile output_profile,
                   uint32_t initial_bitrate,
                   Client* client) override;
-  void Encode(const scoped_refptr<media::VideoFrame>& frame,
+  void Encode(const scoped_refptr<VideoFrame>& frame,
               bool force_keyframe) override;
-  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+  void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
   void RequestEncodingParametersChange(uint32_t bitrate,
                                        uint32_t framerate) override;
   void Destroy() override;
@@ -71,7 +70,7 @@
     InputRecord();
     ~InputRecord();
     bool at_device;
-    scoped_refptr<media::VideoFrame> frame;
+    scoped_refptr<VideoFrame> frame;
   };
 
   // Record for output buffers.
@@ -87,7 +86,7 @@
   struct ImageProcessorInputRecord {
     ImageProcessorInputRecord();
     ~ImageProcessorInputRecord();
-    scoped_refptr<media::VideoFrame> frame;
+    scoped_refptr<VideoFrame> frame;
     bool force_keyframe;
   };
 
@@ -124,8 +123,7 @@
   // Encoding tasks, to be run on encode_thread_.
   //
 
-  void EncodeTask(const scoped_refptr<media::VideoFrame>& frame,
-                  bool force_keyframe);
+  void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
 
   // Add a BitstreamBuffer to the queue of buffers ready to be used for encoder
   // output.
@@ -178,16 +176,16 @@
                                            uint32_t framerate);
 
   // Set up formats and initialize the device for them.
-  bool SetFormats(media::VideoPixelFormat input_format,
-                  media::VideoCodecProfile output_profile);
+  bool SetFormats(VideoPixelFormat input_format,
+                  VideoCodecProfile output_profile);
 
   // Try to set up the device to the input format we were Initialized() with,
   // or if the device doesn't support it, use one it can support, so that we
   // can later instantiate a V4L2ImageProcessor to convert to it.
-  bool NegotiateInputFormat(media::VideoPixelFormat input_format);
+  bool NegotiateInputFormat(VideoPixelFormat input_format);
 
   // Set up the device to the output format requested in Initialize().
-  bool SetOutputFormat(media::VideoCodecProfile output_profile);
+  bool SetOutputFormat(VideoCodecProfile output_profile);
 
   // Initialize device controls with default values.
   bool InitControls();
@@ -215,7 +213,7 @@
   size_t output_buffer_byte_size_;
 
   // Formats for input frames and the output stream.
-  media::VideoPixelFormat device_input_format_;
+  VideoPixelFormat device_input_format_;
   size_t input_planes_count_;
   uint32_t output_format_fourcc_;
 
@@ -235,7 +233,7 @@
   size_t stream_header_size_;
 
   // Video frames ready to be encoded.
-  std::queue<scoped_refptr<media::VideoFrame>> encoder_input_queue_;
+  std::queue<scoped_refptr<VideoFrame>> encoder_input_queue_;
 
   // Encoder device.
   scoped_refptr<V4L2Device> device_;
diff --git a/media/gpu/vaapi_jpeg_decode_accelerator.cc b/media/gpu/vaapi_jpeg_decode_accelerator.cc
index 75c41c2..0303d7d 100644
--- a/media/gpu/vaapi_jpeg_decode_accelerator.cc
+++ b/media/gpu/vaapi_jpeg_decode_accelerator.cc
@@ -37,7 +37,7 @@
 }
 
 static unsigned int VaSurfaceFormatForJpeg(
-    const media::JpegFrameHeader& frame_header) {
+    const JpegFrameHeader& frame_header) {
   // The range of sampling factor is [1, 4]. Pack them into integer to make the
   // matching code simpler. For example, 0x211 means the sampling factor are 2,
   // 1, 1 for 3 components.
@@ -80,7 +80,7 @@
 VaapiJpegDecodeAccelerator::DecodeRequest::DecodeRequest(
     int32_t bitstream_buffer_id,
     std::unique_ptr<SharedMemoryRegion> shm,
-    const scoped_refptr<media::VideoFrame>& video_frame)
+    const scoped_refptr<VideoFrame>& video_frame)
     : bitstream_buffer_id(bitstream_buffer_id),
       shm(std::move(shm)),
       video_frame(video_frame) {}
@@ -153,7 +153,7 @@
 bool VaapiJpegDecodeAccelerator::OutputPicture(
     VASurfaceID va_surface_id,
     int32_t input_buffer_id,
-    const scoped_refptr<media::VideoFrame>& video_frame) {
+    const scoped_refptr<VideoFrame>& video_frame) {
   DCHECK(decoder_task_runner_->BelongsToCurrentThread());
 
   TRACE_EVENT1("jpeg", "VaapiJpegDecodeAccelerator::OutputPicture",
@@ -191,12 +191,12 @@
   size_t src_y_stride = image.pitches[0];
   size_t src_u_stride = image.pitches[1];
   size_t src_v_stride = image.pitches[2];
-  uint8_t* dst_y = video_frame->data(media::VideoFrame::kYPlane);
-  uint8_t* dst_u = video_frame->data(media::VideoFrame::kUPlane);
-  uint8_t* dst_v = video_frame->data(media::VideoFrame::kVPlane);
-  size_t dst_y_stride = video_frame->stride(media::VideoFrame::kYPlane);
-  size_t dst_u_stride = video_frame->stride(media::VideoFrame::kUPlane);
-  size_t dst_v_stride = video_frame->stride(media::VideoFrame::kVPlane);
+  uint8_t* dst_y = video_frame->data(VideoFrame::kYPlane);
+  uint8_t* dst_u = video_frame->data(VideoFrame::kUPlane);
+  uint8_t* dst_v = video_frame->data(VideoFrame::kVPlane);
+  size_t dst_y_stride = video_frame->stride(VideoFrame::kYPlane);
+  size_t dst_u_stride = video_frame->stride(VideoFrame::kUPlane);
+  size_t dst_v_stride = video_frame->stride(VideoFrame::kVPlane);
 
   if (libyuv::I420Copy(src_y, src_y_stride,  // Y
                        src_u, src_u_stride,  // U
@@ -224,8 +224,8 @@
   DCHECK(decoder_task_runner_->BelongsToCurrentThread());
   TRACE_EVENT0("jpeg", "DecodeTask");
 
-  media::JpegParseResult parse_result;
-  if (!media::ParseJpegPicture(
+  JpegParseResult parse_result;
+  if (!ParseJpegPicture(
           reinterpret_cast<const uint8_t*>(request->shm->memory()),
           request->shm->size(), &parse_result)) {
     DLOG(ERROR) << "ParseJpegPicture failed";
@@ -282,8 +282,8 @@
 }
 
 void VaapiJpegDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer,
-    const scoped_refptr<media::VideoFrame>& video_frame) {
+    const BitstreamBuffer& bitstream_buffer,
+    const scoped_refptr<VideoFrame>& video_frame) {
   DVLOG(3) << __func__;
   DCHECK(io_task_runner_->BelongsToCurrentThread());
   TRACE_EVENT1("jpeg", "Decode", "input_id", bitstream_buffer.id());
diff --git a/media/gpu/vaapi_jpeg_decode_accelerator.h b/media/gpu/vaapi_jpeg_decode_accelerator.h
index 353d657..efa4e6391 100644
--- a/media/gpu/vaapi_jpeg_decode_accelerator.h
+++ b/media/gpu/vaapi_jpeg_decode_accelerator.h
@@ -34,16 +34,16 @@
 // stopped during |this->Destroy()|, so any tasks posted to the decoder thread
 // can assume |*this| is still alive.  See |weak_this_| below for more details.
 class MEDIA_GPU_EXPORT VaapiJpegDecodeAccelerator
-    : public media::JpegDecodeAccelerator {
+    : public JpegDecodeAccelerator {
  public:
   VaapiJpegDecodeAccelerator(
       const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
   ~VaapiJpegDecodeAccelerator() override;
 
-  // media::JpegDecodeAccelerator implementation.
-  bool Initialize(media::JpegDecodeAccelerator::Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer,
-              const scoped_refptr<media::VideoFrame>& video_frame) override;
+  // JpegDecodeAccelerator implementation.
+  bool Initialize(JpegDecodeAccelerator::Client* client) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer,
+              const scoped_refptr<VideoFrame>& video_frame) override;
   bool IsSupported() override;
 
  private:
@@ -52,12 +52,12 @@
   struct DecodeRequest {
     DecodeRequest(int32_t bitstream_buffer_id,
                   std::unique_ptr<SharedMemoryRegion> shm,
-                  const scoped_refptr<media::VideoFrame>& video_frame);
+                  const scoped_refptr<VideoFrame>& video_frame);
     ~DecodeRequest();
 
     int32_t bitstream_buffer_id;
     std::unique_ptr<SharedMemoryRegion> shm;
-    scoped_refptr<media::VideoFrame> video_frame;
+    scoped_refptr<VideoFrame> video_frame;
   };
 
   // Notifies the client that an error has occurred and decoding cannot
@@ -74,7 +74,7 @@
   // client for output.
   bool OutputPicture(VASurfaceID va_surface_id,
                      int32_t input_buffer_id,
-                     const scoped_refptr<media::VideoFrame>& video_frame);
+                     const scoped_refptr<VideoFrame>& video_frame);
 
   // ChildThread's task runner.
   scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
diff --git a/media/gpu/vaapi_jpeg_decoder.cc b/media/gpu/vaapi_jpeg_decoder.cc
index 8c8afcb..8d7f558 100644
--- a/media/gpu/vaapi_jpeg_decoder.cc
+++ b/media/gpu/vaapi_jpeg_decoder.cc
@@ -82,7 +82,7 @@
 
 // VAAPI only support subset of JPEG profiles. This function determines a given
 // parsed JPEG result is supported or not.
-static bool IsVaapiSupportedJpeg(const media::JpegParseResult& jpeg) {
+static bool IsVaapiSupportedJpeg(const JpegParseResult& jpeg) {
   if (jpeg.frame_header.visible_width < 1 ||
       jpeg.frame_header.visible_height < 1) {
     DLOG(ERROR) << "width(" << jpeg.frame_header.visible_width
@@ -132,7 +132,7 @@
 }
 
 static void FillPictureParameters(
-    const media::JpegFrameHeader& frame_header,
+    const JpegFrameHeader& frame_header,
     VAPictureParameterBufferJPEGBaseline* pic_param) {
   memset(pic_param, 0, sizeof(*pic_param));
   pic_param->picture_width = frame_header.coded_width;
@@ -150,13 +150,13 @@
   }
 }
 
-static void FillIQMatrix(const media::JpegQuantizationTable* q_table,
+static void FillIQMatrix(const JpegQuantizationTable* q_table,
                          VAIQMatrixBufferJPEGBaseline* iq_matrix) {
   memset(iq_matrix, 0, sizeof(*iq_matrix));
-  static_assert(media::kJpegMaxQuantizationTableNum ==
+  static_assert(kJpegMaxQuantizationTableNum ==
                     arraysize(iq_matrix->load_quantiser_table),
                 "max number of quantization table mismatched");
-  for (size_t i = 0; i < media::kJpegMaxQuantizationTableNum; i++) {
+  for (size_t i = 0; i < kJpegMaxQuantizationTableNum; i++) {
     if (!q_table[i].valid)
       continue;
     iq_matrix->load_quantiser_table[i] = 1;
@@ -168,13 +168,13 @@
   }
 }
 
-static void FillHuffmanTable(const media::JpegHuffmanTable* dc_table,
-                             const media::JpegHuffmanTable* ac_table,
+static void FillHuffmanTable(const JpegHuffmanTable* dc_table,
+                             const JpegHuffmanTable* ac_table,
                              VAHuffmanTableBufferJPEGBaseline* huffman_table) {
   memset(huffman_table, 0, sizeof(*huffman_table));
   // Use default huffman tables if not specified in header.
   bool has_huffman_table = false;
-  for (size_t i = 0; i < media::kJpegMaxHuffmanTableNumBaseline; i++) {
+  for (size_t i = 0; i < kJpegMaxHuffmanTableNumBaseline; i++) {
     if (dc_table[i].valid || ac_table[i].valid) {
       has_huffman_table = true;
       break;
@@ -185,7 +185,7 @@
     ac_table = kDefaultAcTable;
   }
 
-  static_assert(media::kJpegMaxHuffmanTableNumBaseline ==
+  static_assert(kJpegMaxHuffmanTableNumBaseline ==
                     arraysize(huffman_table->load_huffman_table),
                 "max number of huffman table mismatched");
   static_assert(sizeof(huffman_table->huffman_table[0].num_dc_codes) ==
@@ -194,7 +194,7 @@
   static_assert(sizeof(huffman_table->huffman_table[0].dc_values[0]) ==
                     sizeof(dc_table[0].code_value[0]),
                 "size of huffman table code value mismatch");
-  for (size_t i = 0; i < media::kJpegMaxHuffmanTableNumBaseline; i++) {
+  for (size_t i = 0; i < kJpegMaxHuffmanTableNumBaseline; i++) {
     if (!dc_table[i].valid || !ac_table[i].valid)
       continue;
     huffman_table->load_huffman_table[i] = 1;
@@ -213,7 +213,7 @@
 }
 
 static void FillSliceParameters(
-    const media::JpegParseResult& parse_result,
+    const JpegParseResult& parse_result,
     VASliceParameterBufferJPEGBaseline* slice_param) {
   memset(slice_param, 0, sizeof(*slice_param));
   slice_param->slice_data_size = parse_result.data_size;
@@ -246,7 +246,7 @@
 
 // static
 bool VaapiJpegDecoder::Decode(VaapiWrapper* vaapi_wrapper,
-                              const media::JpegParseResult& parse_result,
+                              const JpegParseResult& parse_result,
                               VASurfaceID va_surface) {
   DCHECK_NE(va_surface, VA_INVALID_SURFACE);
   if (!IsVaapiSupportedJpeg(parse_result))
diff --git a/media/gpu/vaapi_jpeg_decoder.h b/media/gpu/vaapi_jpeg_decoder.h
index 1db06820..77865b6 100644
--- a/media/gpu/vaapi_jpeg_decoder.h
+++ b/media/gpu/vaapi_jpeg_decoder.h
@@ -10,17 +10,15 @@
 #include "media/gpu/vaapi_wrapper.h"
 
 namespace media {
-struct JpegParseResult;
-}  // namespace media
 
-namespace media {
+struct JpegParseResult;
 
 // A JPEG decoder that utilizes VA-API hardware video decode acceleration on
 // Intel systems. Provides functionality to allow plugging VAAPI HW
 // acceleration into the JpegDecodeAccelerator framework.
 //
 // Clients of this class are expected to manage VA surfaces created via
-// VaapiWrapper, parse JPEG picture via media::ParseJpegPicture, and then pass
+// VaapiWrapper, parse JPEG picture via ParseJpegPicture, and then pass
 // them to this class.
 class MEDIA_GPU_EXPORT VaapiJpegDecoder {
  public:
@@ -33,7 +31,7 @@
   // |va_surface| should be created with size at least as large as the picture
   // size.
   static bool Decode(VaapiWrapper* vaapi_wrapper,
-                     const media::JpegParseResult& parse_result,
+                     const JpegParseResult& parse_result,
                      VASurfaceID va_surface);
 
  private:
diff --git a/media/gpu/vaapi_jpeg_decoder_unittest.cc b/media/gpu/vaapi_jpeg_decoder_unittest.cc
index d0d45e2..44ba749 100644
--- a/media/gpu/vaapi_jpeg_decoder_unittest.cc
+++ b/media/gpu/vaapi_jpeg_decoder_unittest.cc
@@ -43,7 +43,7 @@
                                     VAProfileJPEGBaseline, report_error_cb);
     ASSERT_TRUE(wrapper_);
 
-    base::FilePath input_file = media::GetTestDataFilePath(kTestFilename);
+    base::FilePath input_file = GetTestDataFilePath(kTestFilename);
 
     ASSERT_TRUE(base::ReadFileToString(input_file, &jpeg_data_))
         << "failed to read input data from " << input_file.value();
@@ -51,7 +51,7 @@
 
   void TearDown() override { wrapper_ = nullptr; }
 
-  bool VerifyDecode(const media::JpegParseResult& parse_result,
+  bool VerifyDecode(const JpegParseResult& parse_result,
                     const std::string& md5sum);
 
  protected:
@@ -59,9 +59,8 @@
   std::string jpeg_data_;
 };
 
-bool VaapiJpegDecoderTest::VerifyDecode(
-    const media::JpegParseResult& parse_result,
-    const std::string& expected_md5sum) {
+bool VaapiJpegDecoderTest::VerifyDecode(const JpegParseResult& parse_result,
+                                        const std::string& expected_md5sum) {
   gfx::Size size(parse_result.frame_header.coded_width,
                  parse_result.frame_header.coded_height);
 
@@ -90,9 +89,8 @@
   }
   EXPECT_EQ(kI420Fourcc, image.format.fourcc);
 
-  base::StringPiece result(
-      reinterpret_cast<const char*>(mem),
-      media::VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, size));
+  base::StringPiece result(reinterpret_cast<const char*>(mem),
+                           VideoFrame::AllocationSize(PIXEL_FORMAT_I420, size));
   EXPECT_EQ(expected_md5sum, base::MD5String(result));
 
   wrapper_->ReturnVaImage(&image);
@@ -101,19 +99,19 @@
 }
 
 TEST_F(VaapiJpegDecoderTest, DecodeSuccess) {
-  media::JpegParseResult parse_result;
-  ASSERT_TRUE(media::ParseJpegPicture(
-      reinterpret_cast<const uint8_t*>(jpeg_data_.data()), jpeg_data_.size(),
-      &parse_result));
+  JpegParseResult parse_result;
+  ASSERT_TRUE(
+      ParseJpegPicture(reinterpret_cast<const uint8_t*>(jpeg_data_.data()),
+                       jpeg_data_.size(), &parse_result));
 
   EXPECT_TRUE(VerifyDecode(parse_result, kExpectedMd5Sum));
 }
 
 TEST_F(VaapiJpegDecoderTest, DecodeFail) {
-  media::JpegParseResult parse_result;
-  ASSERT_TRUE(media::ParseJpegPicture(
-      reinterpret_cast<const uint8_t*>(jpeg_data_.data()), jpeg_data_.size(),
-      &parse_result));
+  JpegParseResult parse_result;
+  ASSERT_TRUE(
+      ParseJpegPicture(reinterpret_cast<const uint8_t*>(jpeg_data_.data()),
+                       jpeg_data_.size(), &parse_result));
 
   // Not supported by VAAPI.
   parse_result.frame_header.num_components = 1;
diff --git a/media/gpu/vaapi_video_decode_accelerator.cc b/media/gpu/vaapi_video_decode_accelerator.cc
index 045ab17..06fb40f 100644
--- a/media/gpu/vaapi_video_decode_accelerator.cc
+++ b/media/gpu/vaapi_video_decode_accelerator.cc
@@ -117,16 +117,16 @@
   // H264Decoder::H264Accelerator implementation.
   scoped_refptr<H264Picture> CreateH264Picture() override;
 
-  bool SubmitFrameMetadata(const media::H264SPS* sps,
-                           const media::H264PPS* pps,
+  bool SubmitFrameMetadata(const H264SPS* sps,
+                           const H264PPS* pps,
                            const H264DPB& dpb,
                            const H264Picture::Vector& ref_pic_listp0,
                            const H264Picture::Vector& ref_pic_listb0,
                            const H264Picture::Vector& ref_pic_listb1,
                            const scoped_refptr<H264Picture>& pic) override;
 
-  bool SubmitSlice(const media::H264PPS* pps,
-                   const media::H264SliceHeader* slice_hdr,
+  bool SubmitSlice(const H264PPS* pps,
+                   const H264SliceHeader* slice_hdr,
                    const H264Picture::Vector& ref_pic_list0,
                    const H264Picture::Vector& ref_pic_list1,
                    const scoped_refptr<H264Picture>& pic,
@@ -190,7 +190,7 @@
   scoped_refptr<VP8Picture> CreateVP8Picture() override;
 
   bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
-                    const media::Vp8FrameHeader* frame_hdr,
+                    const Vp8FrameHeader* frame_hdr,
                     const scoped_refptr<VP8Picture>& last_frame,
                     const scoped_refptr<VP8Picture>& golden_frame,
                     const scoped_refptr<VP8Picture>& alt_frame) override;
@@ -245,8 +245,8 @@
 
   bool SubmitDecode(
       const scoped_refptr<VP9Picture>& pic,
-      const media::Vp9Segmentation& seg,
-      const media::Vp9LoopFilter& lf,
+      const Vp9Segmentation& seg,
+      const Vp9LoopFilter& lf,
       const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) override;
 
   bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
@@ -313,7 +313,7 @@
       bind_image_cb_(bind_image_cb),
       weak_this_factory_(this) {
   weak_this_ = weak_this_factory_.GetWeakPtr();
-  va_surface_release_cb_ = media::BindToCurrentLoop(
+  va_surface_release_cb_ = BindToCurrentLoop(
       base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
 }
 
@@ -338,7 +338,7 @@
   client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
   client_ = client_ptr_factory_->GetWeakPtr();
 
-  media::VideoCodecProfile profile = config.profile;
+  VideoCodecProfile profile = config.profile;
 
   base::AutoLock auto_lock(lock_);
   DCHECK_EQ(state_, kUninitialized);
@@ -366,16 +366,14 @@
     return false;
   }
 
-  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
+  if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
     h264_accelerator_.reset(
         new VaapiH264Accelerator(this, vaapi_wrapper_.get()));
     decoder_.reset(new H264Decoder(h264_accelerator_.get()));
-  } else if (profile >= media::VP8PROFILE_MIN &&
-             profile <= media::VP8PROFILE_MAX) {
+  } else if (profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX) {
     vp8_accelerator_.reset(new VaapiVP8Accelerator(this, vaapi_wrapper_.get()));
     decoder_.reset(new VP8Decoder(vp8_accelerator_.get()));
-  } else if (profile >= media::VP9PROFILE_MIN &&
-             profile <= media::VP9PROFILE_MAX) {
+  } else if (profile >= VP9PROFILE_MIN && profile <= VP9PROFILE_MAX) {
     vp9_accelerator_.reset(new VaapiVP9Accelerator(this, vaapi_wrapper_.get()));
     decoder_.reset(new VP9Decoder(vp9_accelerator_.get()));
   } else {
@@ -418,8 +416,8 @@
   // (crbug.com/402760). Passing (0, 0) results in the client using the
   // visible size extracted from the container instead.
   if (client_)
-    client_->PictureReady(media::Picture(output_id, input_id, gfx::Rect(0, 0),
-                                         picture->AllowOverlay()));
+    client_->PictureReady(
+        Picture(output_id, input_id, gfx::Rect(0, 0), picture->AllowOverlay()));
 }
 
 void VaapiVideoDecodeAccelerator::TryOutputSurface() {
@@ -446,7 +444,7 @@
 }
 
 void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DCHECK_EQ(message_loop_, base::MessageLoop::current());
   TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
                bitstream_buffer.id());
@@ -697,7 +695,7 @@
 }
 
 void VaapiVideoDecodeAccelerator::Decode(
-    const media::BitstreamBuffer& bitstream_buffer) {
+    const BitstreamBuffer& bitstream_buffer) {
   DCHECK_EQ(message_loop_, base::MessageLoop::current());
 
   TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
@@ -749,7 +747,7 @@
 }
 
 void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& buffers) {
+    const std::vector<PictureBuffer>& buffers) {
   DCHECK_EQ(message_loop_, base::MessageLoop::current());
 
   base::AutoLock auto_lock(lock_);
@@ -1141,8 +1139,8 @@
 }
 
 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata(
-    const media::H264SPS* sps,
-    const media::H264PPS* pps,
+    const H264SPS* sps,
+    const H264PPS* pps,
     const H264DPB& dpb,
     const H264Picture::Vector& ref_pic_listp0,
     const H264Picture::Vector& ref_pic_listb0,
@@ -1254,8 +1252,8 @@
 }
 
 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
-    const media::H264PPS* pps,
-    const media::H264SliceHeader* slice_hdr,
+    const H264PPS* pps,
+    const H264SliceHeader* slice_hdr,
     const H264Picture::Vector& ref_pic_list0,
     const H264Picture::Vector& ref_pic_list1,
     const scoped_refptr<H264Picture>& pic,
@@ -1471,24 +1469,23 @@
 
 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode(
     const scoped_refptr<VP8Picture>& pic,
-    const media::Vp8FrameHeader* frame_hdr,
+    const Vp8FrameHeader* frame_hdr,
     const scoped_refptr<VP8Picture>& last_frame,
     const scoped_refptr<VP8Picture>& golden_frame,
     const scoped_refptr<VP8Picture>& alt_frame) {
   VAIQMatrixBufferVP8 iq_matrix_buf;
   memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8));
 
-  const media::Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr;
-  const media::Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr;
-  static_assert(
-      arraysize(iq_matrix_buf.quantization_index) == media::kMaxMBSegments,
-      "incorrect quantization matrix size");
-  for (size_t i = 0; i < media::kMaxMBSegments; ++i) {
+  const Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr;
+  const Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr;
+  static_assert(arraysize(iq_matrix_buf.quantization_index) == kMaxMBSegments,
+                "incorrect quantization matrix size");
+  for (size_t i = 0; i < kMaxMBSegments; ++i) {
     int q = quant_hdr.y_ac_qi;
 
     if (sgmnt_hdr.segmentation_enabled) {
       if (sgmnt_hdr.segment_feature_mode ==
-          media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
+          Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
         q = sgmnt_hdr.quantizer_update_value[i];
       else
         q += sgmnt_hdr.quantizer_update_value[i];
@@ -1513,7 +1510,7 @@
   VAProbabilityDataBufferVP8 prob_buf;
   memset(&prob_buf, 0, sizeof(VAProbabilityDataBufferVP8));
 
-  const media::Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr;
+  const Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr;
   ARRAY_MEMCPY_CHECKED(prob_buf.dct_coeff_probs, entr_hdr.coeff_probs);
 
   if (!vaapi_wrapper_->SubmitBuffer(VAProbabilityBufferType,
@@ -1552,7 +1549,7 @@
 
   pic_param.out_of_loop_frame = VA_INVALID_SURFACE;
 
-  const media::Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr;
+  const Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr;
 
 #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b)
   FHDR_TO_PP_PF(key_frame, frame_hdr->IsKeyframe() ? 0 : 1);
@@ -1581,7 +1578,7 @@
     int lf_level = lf_hdr.level;
     if (sgmnt_hdr.segmentation_enabled) {
       if (sgmnt_hdr.segment_feature_mode ==
-          media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
+          Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
         lf_level = sgmnt_hdr.lf_update_value[i];
       else
         lf_level += sgmnt_hdr.lf_update_value[i];
@@ -1694,13 +1691,13 @@
 
 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::SubmitDecode(
     const scoped_refptr<VP9Picture>& pic,
-    const media::Vp9Segmentation& seg,
-    const media::Vp9LoopFilter& lf,
+    const Vp9Segmentation& seg,
+    const Vp9LoopFilter& lf,
     const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) {
   VADecPictureParameterBufferVP9 pic_param;
   memset(&pic_param, 0, sizeof(pic_param));
 
-  const media::Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
+  const Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
   DCHECK(frame_hdr);
 
   if (frame_hdr->profile != 0) {
@@ -1774,19 +1771,18 @@
   slice_param.slice_data_offset = 0;
   slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
 
-  static_assert(arraysize(media::Vp9Segmentation::feature_enabled) ==
+  static_assert(arraysize(Vp9Segmentation::feature_enabled) ==
                     arraysize(slice_param.seg_param),
                 "seg_param array of incorrect size");
   for (size_t i = 0; i < arraysize(slice_param.seg_param); ++i) {
     VASegmentParameterVP9& seg_param = slice_param.seg_param[i];
 #define SEG_TO_SP_SF(a, b) seg_param.segment_flags.fields.a = b
-    SEG_TO_SP_SF(
-        segment_reference_enabled,
-        seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME));
+    SEG_TO_SP_SF(segment_reference_enabled,
+                 seg.FeatureEnabled(i, Vp9Segmentation::SEG_LVL_REF_FRAME));
     SEG_TO_SP_SF(segment_reference,
-                 seg.FeatureData(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME));
+                 seg.FeatureData(i, Vp9Segmentation::SEG_LVL_REF_FRAME));
     SEG_TO_SP_SF(segment_reference_skipped,
-                 seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_SKIP));
+                 seg.FeatureEnabled(i, Vp9Segmentation::SEG_LVL_SKIP));
 #undef SEG_TO_SP_SF
 
     ARRAY_MEMCPY_CHECKED(seg_param.filter_level, lf.lvl[i]);
@@ -1830,7 +1826,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
   return VaapiWrapper::GetSupportedDecodeProfiles();
 }
diff --git a/media/gpu/vaapi_video_decode_accelerator.h b/media/gpu/vaapi_video_decode_accelerator.h
index 22ea73a..f9144fc 100644
--- a/media/gpu/vaapi_video_decode_accelerator.h
+++ b/media/gpu/vaapi_video_decode_accelerator.h
@@ -52,7 +52,7 @@
 // stopped during |this->Destroy()|, so any tasks posted to the decoder thread
 // can assume |*this| is still alive.  See |weak_this_| below for more details.
 class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
-    : public media::VideoDecodeAccelerator {
+    : public VideoDecodeAccelerator {
  public:
   class VaapiDecodeSurface;
 
@@ -62,11 +62,10 @@
 
   ~VaapiVideoDecodeAccelerator() override;
 
-  // media::VideoDecodeAccelerator implementation.
+  // VideoDecodeAccelerator implementation.
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
-  void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& buffers) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
 #if defined(USE_OZONE)
   void ImportBufferForPicture(
       int32_t picture_buffer_id,
@@ -81,8 +80,7 @@
       const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
       override;
 
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
 
  private:
   class VaapiH264Accelerator;
@@ -94,8 +92,7 @@
 
   // Map the received input buffer into this process' address space and
   // queue it for decode.
-  void MapAndQueueNewInputBuffer(
-      const media::BitstreamBuffer& bitstream_buffer);
+  void MapAndQueueNewInputBuffer(const BitstreamBuffer& bitstream_buffer);
 
   // Get a new input buffer from the queue and set it up in decoder. This will
   // sleep if no input buffers are available. Return true if a new buffer has
diff --git a/media/gpu/vaapi_video_encode_accelerator.cc b/media/gpu/vaapi_video_encode_accelerator.cc
index 01efaad..72542ba 100644
--- a/media/gpu/vaapi_video_encode_accelerator.cc
+++ b/media/gpu/vaapi_video_encode_accelerator.cc
@@ -91,10 +91,9 @@
 }
 
 struct VaapiVideoEncodeAccelerator::InputFrameRef {
-  InputFrameRef(const scoped_refptr<media::VideoFrame>& frame,
-                bool force_keyframe)
+  InputFrameRef(const scoped_refptr<VideoFrame>& frame, bool force_keyframe)
       : frame(frame), force_keyframe(force_keyframe) {}
-  const scoped_refptr<media::VideoFrame> frame;
+  const scoped_refptr<VideoFrame> frame;
   const bool force_keyframe;
 };
 
@@ -105,7 +104,7 @@
   const std::unique_ptr<SharedMemoryRegion> shm;
 };
 
-media::VideoEncodeAccelerator::SupportedProfiles
+VideoEncodeAccelerator::SupportedProfiles
 VaapiVideoEncodeAccelerator::GetSupportedProfiles() {
   return VaapiWrapper::GetSupportedEncodeProfiles();
 }
@@ -123,7 +122,7 @@
 }
 
 VaapiVideoEncodeAccelerator::VaapiVideoEncodeAccelerator()
-    : profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+    : profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
       mb_width_(0),
       mb_height_(0),
       output_buffer_byte_size_(0),
@@ -154,9 +153,9 @@
 }
 
 bool VaapiVideoEncodeAccelerator::Initialize(
-    media::VideoPixelFormat format,
+    VideoPixelFormat format,
     const gfx::Size& input_visible_size,
-    media::VideoCodecProfile output_profile,
+    VideoCodecProfile output_profile,
     uint32_t initial_bitrate,
     Client* client) {
   DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -164,7 +163,7 @@
   DCHECK_EQ(state_, kUninitialized);
 
   DVLOGF(1) << "Initializing VAVEA, input_format: "
-            << media::VideoPixelFormatToString(format)
+            << VideoPixelFormatToString(format)
             << ", input_visible_size: " << input_visible_size.ToString()
             << ", output_profile: " << output_profile
             << ", initial_bitrate: " << initial_bitrate;
@@ -188,9 +187,9 @@
     return false;
   }
 
-  if (format != media::PIXEL_FORMAT_I420) {
+  if (format != PIXEL_FORMAT_I420) {
     DVLOGF(1) << "Unsupported input format: "
-              << media::VideoPixelFormatToString(format);
+              << VideoPixelFormatToString(format);
     return false;
   }
 
@@ -234,7 +233,7 @@
   DCHECK_EQ(state_, kUninitialized);
   DVLOGF(4);
 
-  va_surface_release_cb_ = media::BindToCurrentLoop(
+  va_surface_release_cb_ = BindToCurrentLoop(
       base::Bind(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID,
                  base::Unretained(this)));
 
@@ -286,11 +285,11 @@
   }
 
   if (current_pic_->frame_num % i_period_ == 0)
-    current_pic_->type = media::H264SliceHeader::kISlice;
+    current_pic_->type = H264SliceHeader::kISlice;
   else
-    current_pic_->type = media::H264SliceHeader::kPSlice;
+    current_pic_->type = H264SliceHeader::kPSlice;
 
-  if (current_pic_->type != media::H264SliceHeader::kBSlice)
+  if (current_pic_->type != H264SliceHeader::kBSlice)
     current_pic_->ref = true;
 
   current_pic_->pic_order_cnt = current_pic_->frame_num * 2;
@@ -477,7 +476,7 @@
 
 bool VaapiVideoEncodeAccelerator::SubmitHeadersIfNeeded() {
   DCHECK(current_pic_);
-  if (current_pic_->type != media::H264SliceHeader::kISlice)
+  if (current_pic_->type != H264SliceHeader::kISlice)
     return true;
 
   // Submit PPS.
@@ -520,7 +519,7 @@
 }
 
 bool VaapiVideoEncodeAccelerator::UploadFrame(
-    const scoped_refptr<media::VideoFrame>& frame) {
+    const scoped_refptr<VideoFrame>& frame) {
   return vaapi_wrapper_->UploadVideoFrameToSurface(
       frame, current_encode_job_->input_surface->id());
 }
@@ -560,9 +559,8 @@
                  encode_job->keyframe, encode_job->timestamp));
 }
 
-void VaapiVideoEncodeAccelerator::Encode(
-    const scoped_refptr<media::VideoFrame>& frame,
-    bool force_keyframe) {
+void VaapiVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+                                         bool force_keyframe) {
   DVLOGF(3) << "Frame timestamp: " << frame->timestamp().InMilliseconds()
             << " force_keyframe: " << force_keyframe;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -606,7 +604,7 @@
 }
 
 void VaapiVideoEncodeAccelerator::EncodeTask(
-    const scoped_refptr<media::VideoFrame>& frame,
+    const scoped_refptr<VideoFrame>& frame,
     bool force_keyframe) {
   DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_NE(state_, kUninitialized);
@@ -658,7 +656,7 @@
 }
 
 void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
-    const media::BitstreamBuffer& buffer) {
+    const BitstreamBuffer& buffer) {
   DVLOGF(4) << "id: " << buffer.id();
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
@@ -770,24 +768,24 @@
 }
 
 void VaapiVideoEncodeAccelerator::UpdateSPS() {
-  memset(&current_sps_, 0, sizeof(media::H264SPS));
+  memset(&current_sps_, 0, sizeof(H264SPS));
 
   // Spec A.2 and A.3.
   switch (profile_) {
-    case media::H264PROFILE_BASELINE:
+    case H264PROFILE_BASELINE:
       // Due to crbug.com/345569, we don't distinguish between constrained
       // and non-constrained baseline profiles. Since many codecs can't do
       // non-constrained, and constrained is usually what we mean (and it's a
       // subset of non-constrained), default to it.
-      current_sps_.profile_idc = media::H264SPS::kProfileIDCBaseline;
+      current_sps_.profile_idc = H264SPS::kProfileIDCBaseline;
       current_sps_.constraint_set0_flag = true;
       break;
-    case media::H264PROFILE_MAIN:
-      current_sps_.profile_idc = media::H264SPS::kProfileIDCMain;
+    case H264PROFILE_MAIN:
+      current_sps_.profile_idc = H264SPS::kProfileIDCMain;
       current_sps_.constraint_set1_flag = true;
       break;
-    case media::H264PROFILE_HIGH:
-      current_sps_.profile_idc = media::H264SPS::kProfileIDCHigh;
+    case H264PROFILE_HIGH:
+      current_sps_.profile_idc = H264SPS::kProfileIDCHigh;
       break;
     default:
       NOTIMPLEMENTED();
@@ -842,28 +840,24 @@
   current_sps_.bit_rate_scale = kBitRateScale;
   current_sps_.cpb_size_scale = kCPBSizeScale;
   current_sps_.bit_rate_value_minus1[0] =
-      (bitrate_ >>
-       (kBitRateScale + media::H264SPS::kBitRateScaleConstantTerm)) -
-      1;
+      (bitrate_ >> (kBitRateScale + H264SPS::kBitRateScaleConstantTerm)) - 1;
   current_sps_.cpb_size_value_minus1[0] =
-      (cpb_size_ >>
-       (kCPBSizeScale + media::H264SPS::kCPBSizeScaleConstantTerm)) -
-      1;
+      (cpb_size_ >> (kCPBSizeScale + H264SPS::kCPBSizeScaleConstantTerm)) - 1;
   current_sps_.cbr_flag[0] = true;
   current_sps_.initial_cpb_removal_delay_length_minus_1 =
-      media::H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
+      H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
   current_sps_.cpb_removal_delay_length_minus1 =
-      media::H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
+      H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
   current_sps_.dpb_output_delay_length_minus1 =
-      media::H264SPS::kDefaultDPBOutputDelayLength - 1;
-  current_sps_.time_offset_length = media::H264SPS::kDefaultTimeOffsetLength;
+      H264SPS::kDefaultDPBOutputDelayLength - 1;
+  current_sps_.time_offset_length = H264SPS::kDefaultTimeOffsetLength;
   current_sps_.low_delay_hrd_flag = false;
 }
 
 void VaapiVideoEncodeAccelerator::GeneratePackedSPS() {
   packed_sps_.Reset();
 
-  packed_sps_.BeginNALU(media::H264NALU::kSPS, 3);
+  packed_sps_.BeginNALU(H264NALU::kSPS, 3);
 
   packed_sps_.AppendBits(8, current_sps_.profile_idc);
   packed_sps_.AppendBool(current_sps_.constraint_set0_flag);
@@ -876,7 +870,7 @@
   packed_sps_.AppendBits(8, current_sps_.level_idc);
   packed_sps_.AppendUE(current_sps_.seq_parameter_set_id);
 
-  if (current_sps_.profile_idc == media::H264SPS::kProfileIDCHigh) {
+  if (current_sps_.profile_idc == H264SPS::kProfileIDCHigh) {
     packed_sps_.AppendUE(current_sps_.chroma_format_idc);
     if (current_sps_.chroma_format_idc == 3)
       packed_sps_.AppendBool(current_sps_.separate_colour_plane_flag);
@@ -975,13 +969,13 @@
 }
 
 void VaapiVideoEncodeAccelerator::UpdatePPS() {
-  memset(&current_pps_, 0, sizeof(media::H264PPS));
+  memset(&current_pps_, 0, sizeof(H264PPS));
 
   current_pps_.seq_parameter_set_id = current_sps_.seq_parameter_set_id;
   current_pps_.pic_parameter_set_id = 0;
 
   current_pps_.entropy_coding_mode_flag =
-      current_sps_.profile_idc >= media::H264SPS::kProfileIDCMain;
+      current_sps_.profile_idc >= H264SPS::kProfileIDCMain;
 
   CHECK_GT(max_ref_idx_l0_size_, 0u);
   current_pps_.num_ref_idx_l0_default_active_minus1 = max_ref_idx_l0_size_ - 1;
@@ -990,13 +984,13 @@
   current_pps_.pic_init_qp_minus26 = qp_ - 26;
   current_pps_.deblocking_filter_control_present_flag = true;
   current_pps_.transform_8x8_mode_flag =
-      (current_sps_.profile_idc == media::H264SPS::kProfileIDCHigh);
+      (current_sps_.profile_idc == H264SPS::kProfileIDCHigh);
 }
 
 void VaapiVideoEncodeAccelerator::GeneratePackedPPS() {
   packed_pps_.Reset();
 
-  packed_pps_.BeginNALU(media::H264NALU::kPPS, 3);
+  packed_pps_.BeginNALU(H264NALU::kPPS, 3);
 
   packed_pps_.AppendUE(current_pps_.pic_parameter_set_id);
   packed_pps_.AppendUE(current_pps_.seq_parameter_set_id);
diff --git a/media/gpu/vaapi_video_encode_accelerator.h b/media/gpu/vaapi_video_encode_accelerator.h
index e1c8d80..8b7d29ae 100644
--- a/media/gpu/vaapi_video_encode_accelerator.h
+++ b/media/gpu/vaapi_video_encode_accelerator.h
@@ -28,22 +28,21 @@
 // (http://www.freedesktop.org/wiki/Software/vaapi) for HW-accelerated
 // video encode.
 class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
-    : public media::VideoEncodeAccelerator {
+    : public VideoEncodeAccelerator {
  public:
   VaapiVideoEncodeAccelerator();
   ~VaapiVideoEncodeAccelerator() override;
 
-  // media::VideoEncodeAccelerator implementation.
-  media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
-      override;
-  bool Initialize(media::VideoPixelFormat format,
+  // VideoEncodeAccelerator implementation.
+  VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+  bool Initialize(VideoPixelFormat format,
                   const gfx::Size& input_visible_size,
-                  media::VideoCodecProfile output_profile,
+                  VideoCodecProfile output_profile,
                   uint32_t initial_bitrate,
                   Client* client) override;
-  void Encode(const scoped_refptr<media::VideoFrame>& frame,
+  void Encode(const scoped_refptr<VideoFrame>& frame,
               bool force_keyframe) override;
-  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+  void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
   void RequestEncodingParametersChange(uint32_t bitrate,
                                        uint32_t framerate) override;
   void Destroy() override;
@@ -95,8 +94,7 @@
   // Tasks for each of the VEA interface calls to be executed on the
   // encoder thread.
   void InitializeTask();
-  void EncodeTask(const scoped_refptr<media::VideoFrame>& frame,
-                  bool force_keyframe);
+  void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
   void UseOutputBitstreamBufferTask(
       std::unique_ptr<BitstreamBufferRef> buffer_ref);
   void RequestEncodingParametersChangeTask(uint32_t bitrate,
@@ -136,7 +134,7 @@
   bool SubmitHeadersIfNeeded();
 
   // Upload image data from |frame| to the input surface for current job.
-  bool UploadFrame(const scoped_refptr<media::VideoFrame>& frame);
+  bool UploadFrame(const scoped_refptr<VideoFrame>& frame);
 
   // Execute encode in hardware. This does not block and will return before
   // the job is finished.
@@ -162,7 +160,7 @@
   scoped_refptr<VaapiWrapper> vaapi_wrapper_;
 
   // Input profile and sizes.
-  media::VideoCodecProfile profile_;
+  VideoCodecProfile profile_;
   gfx::Size visible_size_;
   gfx::Size coded_size_;  // Macroblock-aligned.
   // Width/height in macroblocks.
@@ -213,10 +211,10 @@
   // Current SPS, PPS and their packed versions. Packed versions are their NALUs
   // in AnnexB format *without* emulation prevention three-byte sequences
   // (those will be added by the driver).
-  media::H264SPS current_sps_;
-  media::H264BitstreamBuffer packed_sps_;
-  media::H264PPS current_pps_;
-  media::H264BitstreamBuffer packed_pps_;
+  H264SPS current_sps_;
+  H264BitstreamBuffer packed_sps_;
+  H264PPS current_pps_;
+  H264BitstreamBuffer packed_pps_;
 
   // Picture currently being prepared for encode.
   scoped_refptr<H264Picture> current_pic_;
diff --git a/media/gpu/vaapi_wrapper.cc b/media/gpu/vaapi_wrapper.cc
index 755a74a3..fa4a1c8 100644
--- a/media/gpu/vaapi_wrapper.cc
+++ b/media/gpu/vaapi_wrapper.cc
@@ -118,21 +118,21 @@
 };
 
 struct ProfileMap {
-  media::VideoCodecProfile profile;
+  VideoCodecProfile profile;
   VAProfile va_profile;
 };
 
 // A map between VideoCodecProfile and VAProfile.
 static const ProfileMap kProfileMap[] = {
-    {media::H264PROFILE_BASELINE, VAProfileH264Baseline},
-    {media::H264PROFILE_MAIN, VAProfileH264Main},
+    {H264PROFILE_BASELINE, VAProfileH264Baseline},
+    {H264PROFILE_MAIN, VAProfileH264Main},
     // TODO(posciak): See if we can/want support other variants of
-    // media::H264PROFILE_HIGH*.
-    {media::H264PROFILE_HIGH, VAProfileH264High},
-    {media::VP8PROFILE_ANY, VAProfileVP8Version0_3},
+    // H264PROFILE_HIGH*.
+    {H264PROFILE_HIGH, VAProfileH264High},
+    {VP8PROFILE_ANY, VAProfileVP8Version0_3},
     // TODO(servolk): Need to add VP9 profiles 1,2,3 here after rolling
     // third_party/libva to 1.7. crbug.com/598118
-    {media::VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
+    {VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
 };
 
 static std::vector<VAConfigAttrib> GetRequiredAttribs(
@@ -205,7 +205,7 @@
 // static
 scoped_refptr<VaapiWrapper> VaapiWrapper::CreateForVideoCodec(
     CodecMode mode,
-    media::VideoCodecProfile profile,
+    VideoCodecProfile profile,
     const base::Closure& report_error_to_uma_cb) {
   VAProfile va_profile = ProfileToVAProfile(profile, mode);
   scoped_refptr<VaapiWrapper> vaapi_wrapper =
@@ -214,9 +214,9 @@
 }
 
 // static
-media::VideoEncodeAccelerator::SupportedProfiles
+VideoEncodeAccelerator::SupportedProfiles
 VaapiWrapper::GetSupportedEncodeProfiles() {
-  media::VideoEncodeAccelerator::SupportedProfiles profiles;
+  VideoEncodeAccelerator::SupportedProfiles profiles;
   std::vector<ProfileInfo> encode_profile_infos =
       profile_infos_.Get().GetSupportedProfileInfosForCodecMode(kEncode);
 
@@ -226,7 +226,7 @@
       continue;
     for (const auto& profile_info : encode_profile_infos) {
       if (profile_info.va_profile == va_profile) {
-        media::VideoEncodeAccelerator::SupportedProfile profile;
+        VideoEncodeAccelerator::SupportedProfile profile;
         profile.profile = kProfileMap[i].profile;
         profile.max_resolution = profile_info.max_resolution;
         profile.max_framerate_numerator = kMaxEncoderFramerate;
@@ -240,9 +240,9 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 VaapiWrapper::GetSupportedDecodeProfiles() {
-  media::VideoDecodeAccelerator::SupportedProfiles profiles;
+  VideoDecodeAccelerator::SupportedProfiles profiles;
   std::vector<ProfileInfo> decode_profile_infos =
       profile_infos_.Get().GetSupportedProfileInfosForCodecMode(kDecode);
 
@@ -252,7 +252,7 @@
       continue;
     for (const auto& profile_info : decode_profile_infos) {
       if (profile_info.va_profile == va_profile) {
-        media::VideoDecodeAccelerator::SupportedProfile profile;
+        VideoDecodeAccelerator::SupportedProfile profile;
         profile.profile = kProfileMap[i].profile;
         profile.max_resolution = profile_info.max_resolution;
         profile.min_resolution.SetSize(16, 16);
@@ -284,7 +284,7 @@
 }
 
 // static
-VAProfile VaapiWrapper::ProfileToVAProfile(media::VideoCodecProfile profile,
+VAProfile VaapiWrapper::ProfileToVAProfile(VideoCodecProfile profile,
                                            CodecMode mode) {
   VAProfile va_profile = VAProfileNone;
   for (size_t i = 0; i < arraysize(kProfileMap); ++i) {
@@ -295,7 +295,7 @@
   }
   if (!profile_infos_.Get().IsProfileSupported(mode, va_profile) &&
       va_profile == VAProfileH264Baseline) {
-    // crbug.com/345569: media::ProfileIDToVideoCodecProfile() currently strips
+    // crbug.com/345569: ProfileIDToVideoCodecProfile() currently strips
     // the information whether the profile is constrained or not, so we have no
     // way to know here. Try for baseline first, but if it is not supported,
     // try constrained baseline and hope this is what it actually is
@@ -930,7 +930,7 @@
 }
 
 bool VaapiWrapper::UploadVideoFrameToSurface(
-    const scoped_refptr<media::VideoFrame>& frame,
+    const scoped_refptr<VideoFrame>& frame,
     VASurfaceID va_surface_id) {
   base::AutoLock auto_lock(*va_lock_);
 
@@ -959,12 +959,9 @@
   {
     base::AutoUnlock auto_unlock(*va_lock_);
     ret = libyuv::I420ToNV12(
-        frame->data(media::VideoFrame::kYPlane),
-        frame->stride(media::VideoFrame::kYPlane),
-        frame->data(media::VideoFrame::kUPlane),
-        frame->stride(media::VideoFrame::kUPlane),
-        frame->data(media::VideoFrame::kVPlane),
-        frame->stride(media::VideoFrame::kVPlane),
+        frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
+        frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
+        frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
         static_cast<uint8_t*>(image_ptr) + image.offsets[0], image.pitches[0],
         static_cast<uint8_t*>(image_ptr) + image.offsets[1], image.pitches[1],
         image.width, image.height);
diff --git a/media/gpu/vaapi_wrapper.h b/media/gpu/vaapi_wrapper.h
index e5f84a4..16efa2d 100644
--- a/media/gpu/vaapi_wrapper.h
+++ b/media/gpu/vaapi_wrapper.h
@@ -77,16 +77,14 @@
   // errors to clients via method return values.
   static scoped_refptr<VaapiWrapper> CreateForVideoCodec(
       CodecMode mode,
-      media::VideoCodecProfile profile,
+      VideoCodecProfile profile,
       const base::Closure& report_error_to_uma_cb);
 
   // Return the supported video encode profiles.
-  static media::VideoEncodeAccelerator::SupportedProfiles
-  GetSupportedEncodeProfiles();
+  static VideoEncodeAccelerator::SupportedProfiles GetSupportedEncodeProfiles();
 
   // Return the supported video decode profiles.
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedDecodeProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles();
 
   // Return true when JPEG decode is supported.
   static bool IsJpegDecodeSupported();
@@ -189,7 +187,7 @@
   void ReturnVaImage(VAImage* image);
 
   // Upload contents of |frame| into |va_surface_id| for encode.
-  bool UploadVideoFrameToSurface(const scoped_refptr<media::VideoFrame>& frame,
+  bool UploadVideoFrameToSurface(const scoped_refptr<VideoFrame>& frame,
                                  VASurfaceID va_surface_id);
 
   // Create a buffer of |size| bytes to be used as encode output.
@@ -344,7 +342,7 @@
   // Map VideoCodecProfile enum values to VaProfile values. This function
   // includes a workaround for crbug.com/345569. If va_profile is h264 baseline
   // and it is not supported, we try constrained baseline.
-  static VAProfile ProfileToVAProfile(media::VideoCodecProfile profile,
+  static VAProfile ProfileToVAProfile(VideoCodecProfile profile,
                                       CodecMode mode);
 
   // Pointer to VADisplayState's member |va_lock_|. Guaranteed to be valid for
diff --git a/media/gpu/video_decode_accelerator_unittest.cc b/media/gpu/video_decode_accelerator_unittest.cc
index f0c75c99..81fede7 100644
--- a/media/gpu/video_decode_accelerator_unittest.cc
+++ b/media/gpu/video_decode_accelerator_unittest.cc
@@ -85,9 +85,8 @@
 #include "ui/ozone/public/surface_factory_ozone.h"
 #endif  // defined(USE_OZONE)
 
-using media::VideoDecodeAccelerator;
-
 namespace media {
+
 namespace {
 
 // Values optionally filled in from flags; see main() below.
@@ -105,7 +104,7 @@
 // - |minFPSwithRender| and |minFPSnoRender| are minimum frames/second speeds
 //   expected to be achieved with and without rendering to the screen, resp.
 //   (the latter tests just decode speed).
-// - |profile| is the media::VideoCodecProfile set during Initialization.
+// - |profile| is the VideoCodecProfile set during Initialization.
 // An empty value for a numeric field means "ignore".
 const base::FilePath::CharType* g_test_video_data =
     // FILE_PATH_LITERAL("test-25fps.vp8:320:240:250:250:50:175:11");
@@ -166,7 +165,7 @@
         num_fragments(-1),
         min_fps_render(-1),
         min_fps_no_render(-1),
-        profile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+        profile(VIDEO_CODEC_PROFILE_UNKNOWN),
         reset_after_frame_num(END_OF_STREAM_RESET) {}
 
   base::FilePath::StringType file_name;
@@ -176,7 +175,7 @@
   int num_fragments;
   int min_fps_render;
   int min_fps_no_render;
-  media::VideoCodecProfile profile;
+  VideoCodecProfile profile;
   int reset_after_frame_num;
   std::string data_str;
 };
@@ -288,7 +287,7 @@
   static scoped_refptr<TextureRef> CreatePreallocated(
       uint32_t texture_id,
       const base::Closure& no_longer_needed_cb,
-      media::VideoPixelFormat pixel_format,
+      VideoPixelFormat pixel_format,
       const gfx::Size& size);
 
   gfx::GpuMemoryBufferHandle ExportGpuMemoryBufferHandle() const;
@@ -323,13 +322,13 @@
 
 #if defined(USE_OZONE)
 gfx::BufferFormat VideoPixelFormatToGfxBufferFormat(
-    media::VideoPixelFormat pixel_format) {
+    VideoPixelFormat pixel_format) {
   switch (pixel_format) {
-    case media::VideoPixelFormat::PIXEL_FORMAT_ARGB:
+    case VideoPixelFormat::PIXEL_FORMAT_ARGB:
       return gfx::BufferFormat::BGRA_8888;
-    case media::VideoPixelFormat::PIXEL_FORMAT_XRGB:
+    case VideoPixelFormat::PIXEL_FORMAT_XRGB:
       return gfx::BufferFormat::BGRX_8888;
-    case media::VideoPixelFormat::PIXEL_FORMAT_NV12:
+    case VideoPixelFormat::PIXEL_FORMAT_NV12:
       return gfx::BufferFormat::YUV_420_BIPLANAR;
     default:
       LOG_ASSERT(false) << "Unknown VideoPixelFormat";
@@ -342,7 +341,7 @@
 scoped_refptr<TextureRef> TextureRef::CreatePreallocated(
     uint32_t texture_id,
     const base::Closure& no_longer_needed_cb,
-    media::VideoPixelFormat pixel_format,
+    VideoPixelFormat pixel_format,
     const gfx::Size& size) {
   scoped_refptr<TextureRef> texture_ref;
 #if defined(USE_OZONE)
@@ -411,7 +410,7 @@
                        int delete_decoder_state,
                        int frame_width,
                        int frame_height,
-                       media::VideoCodecProfile profile,
+                       VideoCodecProfile profile,
                        int fake_decoder,
                        bool suppress_rendering,
                        int delay_reuse_after_frame_num,
@@ -428,7 +427,7 @@
                              const gfx::Size& dimensions,
                              uint32_t texture_target) override;
   void DismissPictureBuffer(int32_t picture_buffer_id) override;
-  void PictureReady(const media::Picture& picture) override;
+  void PictureReady(const Picture& picture) override;
   // Simple state changes.
   void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
   void NotifyFlushDone() override;
@@ -495,7 +494,7 @@
   int num_decoded_frames_;
   int num_done_bitstream_buffers_;
   base::TimeTicks initialize_done_ticks_;
-  media::VideoCodecProfile profile_;
+  VideoCodecProfile profile_;
   int fake_decoder_;
   GLenum texture_target_;
   VideoPixelFormat pixel_format_;
@@ -552,7 +551,7 @@
     int delete_decoder_state,
     int frame_width,
     int frame_height,
-    media::VideoCodecProfile profile,
+    VideoCodecProfile profile,
     int fake_decoder,
     bool suppress_rendering,
     int delay_reuse_after_frame_num,
@@ -591,9 +590,8 @@
     LOG_ASSERT(1 == num_in_flight_decodes_);
 
   // Default to H264 baseline if no profile provided.
-  profile_ = (profile != media::VIDEO_CODEC_PROFILE_UNKNOWN
-                  ? profile
-                  : media::H264PROFILE_BASELINE);
+  profile_ =
+      (profile != VIDEO_CODEC_PROFILE_UNKNOWN ? profile : H264PROFILE_BASELINE);
 
   weak_this_ = weak_this_factory_.GetWeakPtr();
 }
@@ -623,8 +621,7 @@
 
     VideoDecodeAccelerator::Config config(profile_);
     if (g_test_import) {
-      config.output_mode =
-          media::VideoDecodeAccelerator::Config::OutputMode::IMPORT;
+      config.output_mode = VideoDecodeAccelerator::Config::OutputMode::IMPORT;
     }
     gpu::GpuPreferences gpu_preferences;
     decoder_ = vda_factory_->CreateVDA(this, config, gpu_preferences);
@@ -652,11 +649,11 @@
   if (decoder_deleted())
     return;
   LOG_ASSERT(textures_per_buffer == 1u);
-  std::vector<media::PictureBuffer> buffers;
+  std::vector<PictureBuffer> buffers;
 
   requested_num_of_buffers += kExtraPictureBuffers;
-  if (pixel_format == media::PIXEL_FORMAT_UNKNOWN)
-    pixel_format = media::PIXEL_FORMAT_ARGB;
+  if (pixel_format == PIXEL_FORMAT_UNKNOWN)
+    pixel_format = PIXEL_FORMAT_ARGB;
 
   LOG_ASSERT((pixel_format_ == PIXEL_FORMAT_UNKNOWN) ||
              (pixel_format_ == pixel_format));
@@ -690,9 +687,9 @@
         active_textures_.insert(std::make_pair(picture_buffer_id, texture_ref))
             .second);
 
-    media::PictureBuffer::TextureIds ids;
+    PictureBuffer::TextureIds ids;
     ids.push_back(texture_id);
-    buffers.push_back(media::PictureBuffer(picture_buffer_id, dimensions, ids));
+    buffers.push_back(PictureBuffer(picture_buffer_id, dimensions, ids));
   }
   decoder_->AssignPictureBuffers(buffers);
 
@@ -713,7 +710,7 @@
   LOG_ASSERT(1U == active_textures_.erase(picture_buffer_id));
 }
 
-void GLRenderingVDAClient::PictureReady(const media::Picture& picture) {
+void GLRenderingVDAClient::PictureReady(const Picture& picture) {
   // We shouldn't be getting pictures delivered after Reset has completed.
   LOG_ASSERT(state_ < CS_RESET);
 
@@ -910,7 +907,7 @@
 
 std::string GLRenderingVDAClient::GetBytesForFirstFragment(size_t start_pos,
                                                            size_t* end_pos) {
-  if (profile_ < media::H264PROFILE_MAX) {
+  if (profile_ < H264PROFILE_MAX) {
     *end_pos = start_pos;
     while (*end_pos + 4 < encoded_data_.size()) {
       if ((encoded_data_[*end_pos + 4] & 0x1f) == 0x7)  // SPS start frame
@@ -921,13 +918,13 @@
     *end_pos = start_pos;
     return std::string();
   }
-  DCHECK_LE(profile_, media::VP9PROFILE_MAX);
+  DCHECK_LE(profile_, VP9PROFILE_MAX);
   return GetBytesForNextFragment(start_pos, end_pos);
 }
 
 std::string GLRenderingVDAClient::GetBytesForNextFragment(size_t start_pos,
                                                           size_t* end_pos) {
-  if (profile_ < media::H264PROFILE_MAX) {
+  if (profile_ < H264PROFILE_MAX) {
     *end_pos = start_pos;
     GetBytesForNextNALU(*end_pos, end_pos);
     if (start_pos != *end_pos) {
@@ -935,7 +932,7 @@
     }
     return encoded_data_.substr(start_pos, *end_pos - start_pos);
   }
-  DCHECK_LE(profile_, media::VP9PROFILE_MAX);
+  DCHECK_LE(profile_, VP9PROFILE_MAX);
   return GetBytesForNextFrame(start_pos, end_pos);
 }
 
@@ -971,20 +968,19 @@
 
 static bool FragmentHasConfigInfo(const uint8_t* data,
                                   size_t size,
-                                  media::VideoCodecProfile profile) {
-  if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
-    media::H264Parser parser;
+                                  VideoCodecProfile profile) {
+  if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
+    H264Parser parser;
     parser.SetStream(data, size);
-    media::H264NALU nalu;
-    media::H264Parser::Result result = parser.AdvanceToNextNALU(&nalu);
-    if (result != media::H264Parser::kOk) {
+    H264NALU nalu;
+    H264Parser::Result result = parser.AdvanceToNextNALU(&nalu);
+    if (result != H264Parser::kOk) {
       // Let the VDA figure out there's something wrong with the stream.
       return false;
     }
 
-    return nalu.nal_unit_type == media::H264NALU::kSPS;
-  } else if (profile >= media::VP8PROFILE_MIN &&
-             profile <= media::VP9PROFILE_MAX) {
+    return nalu.nal_unit_type == H264NALU::kSPS;
+  } else if (profile >= VP8PROFILE_MIN && profile <= VP9PROFILE_MAX) {
     return (size > 0 && !(data[0] & 0x01));
   }
   // Shouldn't happen at this point.
@@ -1028,8 +1024,8 @@
   bool result =
       shm.ShareToProcess(base::GetCurrentProcessHandle(), &dup_handle);
   LOG_ASSERT(result);
-  media::BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_, dup_handle,
-                                          next_fragment_size);
+  BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_, dup_handle,
+                                   next_fragment_size);
   decode_start_time_[next_bitstream_buffer_id_] = base::TimeTicks::Now();
   // Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
   next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
@@ -1160,7 +1156,7 @@
     int profile = -1;
     if (!fields[7].empty())
       LOG_ASSERT(base::StringToInt(fields[7], &profile));
-    video_file->profile = static_cast<media::VideoCodecProfile>(profile);
+    video_file->profile = static_cast<VideoCodecProfile>(profile);
 
     // Read in the video data.
     base::FilePath filepath(video_file->file_name);
diff --git a/media/gpu/video_encode_accelerator_unittest.cc b/media/gpu/video_encode_accelerator_unittest.cc
index 3cb9aba..528b24f 100644
--- a/media/gpu/video_encode_accelerator_unittest.cc
+++ b/media/gpu/video_encode_accelerator_unittest.cc
@@ -63,12 +63,10 @@
 #error The VideoEncodeAcceleratorUnittest is not supported on this platform.
 #endif
 
-using media::VideoEncodeAccelerator;
-
 namespace media {
 namespace {
 
-const media::VideoPixelFormat kInputFormat = media::PIXEL_FORMAT_I420;
+const VideoPixelFormat kInputFormat = PIXEL_FORMAT_I420;
 
 // The absolute differences between original frame and decoded frame usually
 // ranges aroud 1 ~ 7. So we pick 10 as an extreme value to detect abnormal
@@ -113,7 +111,7 @@
 // - |in_filename| must be an I420 (YUV planar) raw stream
 //   (see http://www.fourcc.org/yuv.php#IYUV).
 // - |width| and |height| are in pixels.
-// - |profile| to encode into (values of media::VideoCodecProfile).
+// - |profile| to encode into (values of VideoCodecProfile).
 // - |out_filename| filename to save the encoded stream to (optional). The
 //   format for H264 is Annex-B byte stream. The format for VP8 is IVF. Output
 //   stream is saved for the simple encode test only. H264 raw stream and IVF
@@ -177,7 +175,7 @@
   std::vector<size_t> aligned_plane_size;
 
   std::string out_filename;
-  media::VideoCodecProfile requested_profile;
+  VideoCodecProfile requested_profile;
   unsigned int requested_bitrate;
   unsigned int requested_framerate;
   unsigned int requested_subsequent_bitrate;
@@ -201,12 +199,12 @@
   return sorted_values[index];
 }
 
-static bool IsH264(media::VideoCodecProfile profile) {
-  return profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX;
+static bool IsH264(VideoCodecProfile profile) {
+  return profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX;
 }
 
-static bool IsVP8(media::VideoCodecProfile profile) {
-  return profile >= media::VP8PROFILE_MIN && profile <= media::VP8PROFILE_MAX;
+static bool IsVP8(VideoCodecProfile profile) {
+  return profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX;
 }
 
 // ARM performs CPU cache management with CPU cache line granularity. We thus
@@ -230,7 +228,7 @@
               coded_size == test_stream->coded_size);
   test_stream->coded_size = coded_size;
 
-  size_t num_planes = media::VideoFrame::NumPlanes(kInputFormat);
+  size_t num_planes = VideoFrame::NumPlanes(kInputFormat);
   std::vector<size_t> padding_sizes(num_planes);
   std::vector<size_t> coded_bpl(num_planes);
   std::vector<size_t> visible_bpl(num_planes);
@@ -244,18 +242,17 @@
   // copied into a row of coded_bpl bytes in the aligned file.
   for (size_t i = 0; i < num_planes; i++) {
     const size_t size =
-        media::VideoFrame::PlaneSize(kInputFormat, i, coded_size).GetArea();
+        VideoFrame::PlaneSize(kInputFormat, i, coded_size).GetArea();
     test_stream->aligned_plane_size.push_back(Align64Bytes(size));
     test_stream->aligned_buffer_size += test_stream->aligned_plane_size.back();
 
-    coded_bpl[i] =
-        media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width());
-    visible_bpl[i] = media::VideoFrame::RowBytes(
-        i, kInputFormat, test_stream->visible_size.width());
-    visible_plane_rows[i] = media::VideoFrame::Rows(
-        i, kInputFormat, test_stream->visible_size.height());
+    coded_bpl[i] = VideoFrame::RowBytes(i, kInputFormat, coded_size.width());
+    visible_bpl[i] = VideoFrame::RowBytes(i, kInputFormat,
+                                          test_stream->visible_size.width());
+    visible_plane_rows[i] =
+        VideoFrame::Rows(i, kInputFormat, test_stream->visible_size.height());
     const size_t padding_rows =
-        media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) -
+        VideoFrame::Rows(i, kInputFormat, coded_size.height()) -
         visible_plane_rows[i];
     padding_sizes[i] = padding_rows * coded_bpl[i] + Align64Bytes(size) - size;
   }
@@ -264,8 +261,8 @@
   int64_t src_file_size = 0;
   LOG_ASSERT(base::GetFileSize(src_file, &src_file_size));
 
-  size_t visible_buffer_size = media::VideoFrame::AllocationSize(
-      kInputFormat, test_stream->visible_size);
+  size_t visible_buffer_size =
+      VideoFrame::AllocationSize(kInputFormat, test_stream->visible_size);
   LOG_ASSERT(src_file_size % visible_buffer_size == 0U)
       << "Stream byte size is not a product of calculated frame byte size";
 
@@ -337,10 +334,9 @@
     int profile;
     result = base::StringToInt(fields[3], &profile);
     LOG_ASSERT(result);
-    LOG_ASSERT(profile > media::VIDEO_CODEC_PROFILE_UNKNOWN);
-    LOG_ASSERT(profile <= media::VIDEO_CODEC_PROFILE_MAX);
-    test_stream->requested_profile =
-        static_cast<media::VideoCodecProfile>(profile);
+    LOG_ASSERT(profile > VIDEO_CODEC_PROFILE_UNKNOWN);
+    LOG_ASSERT(profile <= VIDEO_CODEC_PROFILE_MAX);
+    test_stream->requested_profile = static_cast<VideoCodecProfile>(profile);
 
     if (fields.size() >= 5 && !fields[4].empty())
       test_stream->out_filename = fields[4];
@@ -463,7 +459,7 @@
 
   // Provide a StreamValidator instance for the given |profile|.
   static std::unique_ptr<StreamValidator> Create(
-      media::VideoCodecProfile profile,
+      VideoCodecProfile profile,
       const FrameFoundCallback& frame_cb);
 
   // Process and verify contents of a bitstream buffer.
@@ -492,49 +488,49 @@
   bool seen_pps_;
   bool seen_idr_;
 
-  media::H264Parser h264_parser_;
+  H264Parser h264_parser_;
 };
 
 void H264Validator::ProcessStreamBuffer(const uint8_t* stream, size_t size) {
   h264_parser_.SetStream(stream, size);
 
   while (1) {
-    media::H264NALU nalu;
-    media::H264Parser::Result result;
+    H264NALU nalu;
+    H264Parser::Result result;
 
     result = h264_parser_.AdvanceToNextNALU(&nalu);
-    if (result == media::H264Parser::kEOStream)
+    if (result == H264Parser::kEOStream)
       break;
 
-    ASSERT_EQ(media::H264Parser::kOk, result);
+    ASSERT_EQ(H264Parser::kOk, result);
 
     bool keyframe = false;
 
     switch (nalu.nal_unit_type) {
-      case media::H264NALU::kIDRSlice:
+      case H264NALU::kIDRSlice:
         ASSERT_TRUE(seen_sps_);
         ASSERT_TRUE(seen_pps_);
         seen_idr_ = true;
         keyframe = true;
       // fallthrough
-      case media::H264NALU::kNonIDRSlice: {
+      case H264NALU::kNonIDRSlice: {
         ASSERT_TRUE(seen_idr_);
         if (!frame_cb_.Run(keyframe))
           return;
         break;
       }
 
-      case media::H264NALU::kSPS: {
+      case H264NALU::kSPS: {
         int sps_id;
-        ASSERT_EQ(media::H264Parser::kOk, h264_parser_.ParseSPS(&sps_id));
+        ASSERT_EQ(H264Parser::kOk, h264_parser_.ParseSPS(&sps_id));
         seen_sps_ = true;
         break;
       }
 
-      case media::H264NALU::kPPS: {
+      case H264NALU::kPPS: {
         ASSERT_TRUE(seen_sps_);
         int pps_id;
-        ASSERT_EQ(media::H264Parser::kOk, h264_parser_.ParsePPS(&pps_id));
+        ASSERT_EQ(H264Parser::kOk, h264_parser_.ParsePPS(&pps_id));
         seen_pps_ = true;
         break;
       }
@@ -572,7 +568,7 @@
 
 // static
 std::unique_ptr<StreamValidator> StreamValidator::Create(
-    media::VideoCodecProfile profile,
+    VideoCodecProfile profile,
     const FrameFoundCallback& frame_cb) {
   std::unique_ptr<StreamValidator> validator;
 
@@ -589,44 +585,44 @@
 
 class VideoFrameQualityValidator {
  public:
-  VideoFrameQualityValidator(const media::VideoCodecProfile profile,
+  VideoFrameQualityValidator(const VideoCodecProfile profile,
                              const base::Closure& flush_complete_cb,
                              const base::Closure& decode_error_cb);
   void Initialize(const gfx::Size& coded_size, const gfx::Rect& visible_size);
   // Save original YUV frame to compare it with the decoded frame later.
-  void AddOriginalFrame(scoped_refptr<media::VideoFrame> frame);
-  void AddDecodeBuffer(const scoped_refptr<media::DecoderBuffer>& buffer);
+  void AddOriginalFrame(scoped_refptr<VideoFrame> frame);
+  void AddDecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer);
   // Flush the decoder.
   void Flush();
 
  private:
   void InitializeCB(bool success);
-  void DecodeDone(media::DecodeStatus status);
-  void FlushDone(media::DecodeStatus status);
-  void VerifyOutputFrame(const scoped_refptr<media::VideoFrame>& output_frame);
+  void DecodeDone(DecodeStatus status);
+  void FlushDone(DecodeStatus status);
+  void VerifyOutputFrame(const scoped_refptr<VideoFrame>& output_frame);
   void Decode();
 
   enum State { UNINITIALIZED, INITIALIZED, DECODING, ERROR };
 
-  const media::VideoCodecProfile profile_;
-  std::unique_ptr<media::FFmpegVideoDecoder> decoder_;
-  media::VideoDecoder::DecodeCB decode_cb_;
+  const VideoCodecProfile profile_;
+  std::unique_ptr<FFmpegVideoDecoder> decoder_;
+  VideoDecoder::DecodeCB decode_cb_;
   // Decode callback of an EOS buffer.
-  media::VideoDecoder::DecodeCB eos_decode_cb_;
+  VideoDecoder::DecodeCB eos_decode_cb_;
   // Callback of Flush(). Called after all frames are decoded.
   const base::Closure flush_complete_cb_;
   const base::Closure decode_error_cb_;
   State decoder_state_;
-  std::queue<scoped_refptr<media::VideoFrame>> original_frames_;
-  std::queue<scoped_refptr<media::DecoderBuffer>> decode_buffers_;
+  std::queue<scoped_refptr<VideoFrame>> original_frames_;
+  std::queue<scoped_refptr<DecoderBuffer>> decode_buffers_;
 };
 
 VideoFrameQualityValidator::VideoFrameQualityValidator(
-    const media::VideoCodecProfile profile,
+    const VideoCodecProfile profile,
     const base::Closure& flush_complete_cb,
     const base::Closure& decode_error_cb)
     : profile_(profile),
-      decoder_(new media::FFmpegVideoDecoder()),
+      decoder_(new FFmpegVideoDecoder()),
       decode_cb_(base::Bind(&VideoFrameQualityValidator::DecodeDone,
                             base::Unretained(this))),
       eos_decode_cb_(base::Bind(&VideoFrameQualityValidator::FlushDone,
@@ -640,21 +636,19 @@
 
 void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
                                             const gfx::Rect& visible_size) {
-  media::FFmpegGlue::InitializeFFmpeg();
+  FFmpegGlue::InitializeFFmpeg();
 
   gfx::Size natural_size(visible_size.size());
   // The default output format of ffmpeg video decoder is YV12.
-  media::VideoDecoderConfig config;
+  VideoDecoderConfig config;
   if (IsVP8(profile_))
-    config.Initialize(media::kCodecVP8, media::VP8PROFILE_ANY, kInputFormat,
-                      media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
-                      natural_size, media::EmptyExtraData(),
-                      media::Unencrypted());
+    config.Initialize(kCodecVP8, VP8PROFILE_ANY, kInputFormat,
+                      COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
+                      natural_size, EmptyExtraData(), Unencrypted());
   else if (IsH264(profile_))
-    config.Initialize(media::kCodecH264, media::H264PROFILE_MAIN, kInputFormat,
-                      media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
-                      natural_size, media::EmptyExtraData(),
-                      media::Unencrypted());
+    config.Initialize(kCodecH264, H264PROFILE_MAIN, kInputFormat,
+                      COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
+                      natural_size, EmptyExtraData(), Unencrypted());
   else
     LOG_ASSERT(0) << "Invalid profile " << profile_;
 
@@ -680,12 +674,12 @@
 }
 
 void VideoFrameQualityValidator::AddOriginalFrame(
-    scoped_refptr<media::VideoFrame> frame) {
+    scoped_refptr<VideoFrame> frame) {
   original_frames_.push(frame);
 }
 
-void VideoFrameQualityValidator::DecodeDone(media::DecodeStatus status) {
-  if (status == media::DecodeStatus::OK) {
+void VideoFrameQualityValidator::DecodeDone(DecodeStatus status) {
+  if (status == DecodeStatus::OK) {
     decoder_state_ = INITIALIZED;
     Decode();
   } else {
@@ -695,19 +689,19 @@
   }
 }
 
-void VideoFrameQualityValidator::FlushDone(media::DecodeStatus status) {
+void VideoFrameQualityValidator::FlushDone(DecodeStatus status) {
   flush_complete_cb_.Run();
 }
 
 void VideoFrameQualityValidator::Flush() {
   if (decoder_state_ != ERROR) {
-    decode_buffers_.push(media::DecoderBuffer::CreateEOSBuffer());
+    decode_buffers_.push(DecoderBuffer::CreateEOSBuffer());
     Decode();
   }
 }
 
 void VideoFrameQualityValidator::AddDecodeBuffer(
-    const scoped_refptr<media::DecoderBuffer>& buffer) {
+    const scoped_refptr<DecoderBuffer>& buffer) {
   if (decoder_state_ != ERROR) {
     decode_buffers_.push(buffer);
     Decode();
@@ -716,7 +710,7 @@
 
 void VideoFrameQualityValidator::Decode() {
   if (decoder_state_ == INITIALIZED && !decode_buffers_.empty()) {
-    scoped_refptr<media::DecoderBuffer> next_buffer = decode_buffers_.front();
+    scoped_refptr<DecoderBuffer> next_buffer = decode_buffers_.front();
     decode_buffers_.pop();
     decoder_state_ = DECODING;
     if (next_buffer->end_of_stream())
@@ -727,22 +721,21 @@
 }
 
 void VideoFrameQualityValidator::VerifyOutputFrame(
-    const scoped_refptr<media::VideoFrame>& output_frame) {
-  scoped_refptr<media::VideoFrame> original_frame = original_frames_.front();
+    const scoped_refptr<VideoFrame>& output_frame) {
+  scoped_refptr<VideoFrame> original_frame = original_frames_.front();
   original_frames_.pop();
   gfx::Size visible_size = original_frame->visible_rect().size();
 
-  int planes[] = {media::VideoFrame::kYPlane, media::VideoFrame::kUPlane,
-                  media::VideoFrame::kVPlane};
+  int planes[] = {VideoFrame::kYPlane, VideoFrame::kUPlane,
+                  VideoFrame::kVPlane};
   double difference = 0;
   for (int plane : planes) {
     uint8_t* original_plane = original_frame->data(plane);
     uint8_t* output_plane = output_frame->data(plane);
 
-    size_t rows =
-        media::VideoFrame::Rows(plane, kInputFormat, visible_size.height());
+    size_t rows = VideoFrame::Rows(plane, kInputFormat, visible_size.height());
     size_t columns =
-        media::VideoFrame::Columns(plane, kInputFormat, visible_size.width());
+        VideoFrame::Columns(plane, kInputFormat, visible_size.width());
     size_t stride = original_frame->stride(plane);
 
     for (size_t i = 0; i < rows; i++)
@@ -751,7 +744,7 @@
                                output_plane[stride * i + j]);
   }
   // Divide the difference by the size of frame.
-  difference /= media::VideoFrame::AllocationSize(kInputFormat, visible_size);
+  difference /= VideoFrame::AllocationSize(kInputFormat, visible_size);
   EXPECT_TRUE(difference <= kDecodeSimilarityThreshold)
       << "differrence = " << difference << "  > decode similarity threshold";
 }
@@ -787,10 +780,10 @@
   // Return the number of encoded frames per second.
   double frames_per_second();
 
-  std::unique_ptr<media::VideoEncodeAccelerator> CreateFakeVEA();
-  std::unique_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
-  std::unique_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
-  std::unique_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
+  std::unique_ptr<VideoEncodeAccelerator> CreateFakeVEA();
+  std::unique_ptr<VideoEncodeAccelerator> CreateV4L2VEA();
+  std::unique_ptr<VideoEncodeAccelerator> CreateVaapiVEA();
+  std::unique_ptr<VideoEncodeAccelerator> CreateVTVEA();
 
   void SetState(ClientState new_state);
 
@@ -831,13 +824,13 @@
 
   // Create and return a VideoFrame wrapping the data at |position| bytes in the
   // input stream.
-  scoped_refptr<media::VideoFrame> CreateFrame(off_t position);
+  scoped_refptr<VideoFrame> CreateFrame(off_t position);
 
   // Prepare and return a frame wrapping the data at |position| bytes in the
   // input stream, ready to be sent to encoder.
   // The input frame id is returned in |input_id|.
-  scoped_refptr<media::VideoFrame> PrepareInputFrame(off_t position,
-                                                     int32_t* input_id);
+  scoped_refptr<VideoFrame> PrepareInputFrame(off_t position,
+                                              int32_t* input_id);
 
   // Update the parameters according to |mid_stream_bitrate_switch| and
   // |mid_stream_framerate_switch|.
@@ -1025,18 +1018,18 @@
   LOG_ASSERT(!has_encoder());
 }
 
-std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateFakeVEA() {
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder;
+std::unique_ptr<VideoEncodeAccelerator> VEAClient::CreateFakeVEA() {
+  std::unique_ptr<VideoEncodeAccelerator> encoder;
   if (g_fake_encoder) {
-    encoder.reset(new media::FakeVideoEncodeAccelerator(
+    encoder.reset(new FakeVideoEncodeAccelerator(
         scoped_refptr<base::SingleThreadTaskRunner>(
             base::ThreadTaskRunnerHandle::Get())));
   }
   return encoder;
 }
 
-std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateV4L2VEA() {
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder;
+std::unique_ptr<VideoEncodeAccelerator> VEAClient::CreateV4L2VEA() {
+  std::unique_ptr<VideoEncodeAccelerator> encoder;
 #if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \
                              (defined(USE_OZONE) && defined(USE_V4L2_CODEC)))
   scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
@@ -1046,16 +1039,16 @@
   return encoder;
 }
 
-std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVaapiVEA() {
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder;
+std::unique_ptr<VideoEncodeAccelerator> VEAClient::CreateVaapiVEA() {
+  std::unique_ptr<VideoEncodeAccelerator> encoder;
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
   encoder.reset(new VaapiVideoEncodeAccelerator());
 #endif
   return encoder;
 }
 
-std::unique_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVTVEA() {
-  std::unique_ptr<media::VideoEncodeAccelerator> encoder;
+std::unique_ptr<VideoEncodeAccelerator> VEAClient::CreateVTVEA() {
+  std::unique_ptr<VideoEncodeAccelerator> encoder;
 #if defined(OS_MACOSX)
   encoder.reset(new VTVideoEncodeAccelerator());
 #endif
@@ -1066,7 +1059,7 @@
   DCHECK(thread_checker_.CalledOnValidThread());
   LOG_ASSERT(!has_encoder());
 
-  std::unique_ptr<media::VideoEncodeAccelerator> encoders[] = {
+  std::unique_ptr<VideoEncodeAccelerator> encoders[] = {
       CreateFakeVEA(), CreateV4L2VEA(), CreateVaapiVEA(), CreateVTVEA()};
 
   DVLOG(1) << "Profile: " << test_stream_->requested_profile
@@ -1244,7 +1237,7 @@
     }
 
     if (quality_validator_) {
-      scoped_refptr<media::DecoderBuffer> buffer(media::DecoderBuffer::CopyFrom(
+      scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
           reinterpret_cast<const uint8_t*>(shm->memory()),
           static_cast<int>(payload_size)));
       quality_validator_->AddDecodeBuffer(buffer);
@@ -1301,7 +1294,7 @@
     FeedEncoderWithOneInput();
 }
 
-scoped_refptr<media::VideoFrame> VEAClient::CreateFrame(off_t position) {
+scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
   uint8_t* frame_data_y =
       reinterpret_cast<uint8_t*>(&test_stream_->aligned_in_file_data[0]) +
       position;
@@ -1309,31 +1302,28 @@
   uint8_t* frame_data_v = frame_data_u + test_stream_->aligned_plane_size[1];
   CHECK_GT(current_framerate_, 0U);
 
-  scoped_refptr<media::VideoFrame> video_frame =
-      media::VideoFrame::WrapExternalYuvData(
-          kInputFormat, input_coded_size_,
-          gfx::Rect(test_stream_->visible_size), test_stream_->visible_size,
-          input_coded_size_.width(), input_coded_size_.width() / 2,
-          input_coded_size_.width() / 2, frame_data_y, frame_data_u,
-          frame_data_v,
-          base::TimeDelta().FromMilliseconds(
-              next_input_id_ * base::Time::kMillisecondsPerSecond /
-              current_framerate_));
+  scoped_refptr<VideoFrame> video_frame = VideoFrame::WrapExternalYuvData(
+      kInputFormat, input_coded_size_, gfx::Rect(test_stream_->visible_size),
+      test_stream_->visible_size, input_coded_size_.width(),
+      input_coded_size_.width() / 2, input_coded_size_.width() / 2,
+      frame_data_y, frame_data_u, frame_data_v,
+      base::TimeDelta().FromMilliseconds(next_input_id_ *
+                                         base::Time::kMillisecondsPerSecond /
+                                         current_framerate_));
   EXPECT_NE(nullptr, video_frame.get());
   return video_frame;
 }
 
-scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(
-    off_t position,
-    int32_t* input_id) {
+scoped_refptr<VideoFrame> VEAClient::PrepareInputFrame(off_t position,
+                                                       int32_t* input_id) {
   CHECK_LE(position + test_stream_->aligned_buffer_size,
            test_stream_->aligned_in_file_data.size());
 
-  scoped_refptr<media::VideoFrame> frame = CreateFrame(position);
+  scoped_refptr<VideoFrame> frame = CreateFrame(position);
   EXPECT_TRUE(frame);
-  frame->AddDestructionObserver(media::BindToCurrentLoop(
-      base::Bind(&VEAClient::InputNoLongerNeededCallback,
-                 base::Unretained(this), next_input_id_)));
+  frame->AddDestructionObserver(
+      BindToCurrentLoop(base::Bind(&VEAClient::InputNoLongerNeededCallback,
+                                   base::Unretained(this), next_input_id_)));
 
   LOG_ASSERT(inputs_at_client_.insert(next_input_id_).second);
 
@@ -1370,7 +1360,7 @@
     quality_validator_->AddOriginalFrame(CreateFrame(pos_in_input_stream_));
 
   int32_t input_id;
-  scoped_refptr<media::VideoFrame> video_frame =
+  scoped_refptr<VideoFrame> video_frame =
       PrepareInputFrame(pos_in_input_stream_, &input_id);
   pos_in_input_stream_ += test_stream_->aligned_buffer_size;
 
@@ -1401,8 +1391,8 @@
   base::SharedMemoryHandle dup_handle;
   LOG_ASSERT(shm->ShareToProcess(base::GetCurrentProcessHandle(), &dup_handle));
 
-  media::BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, dup_handle,
-                                          output_buffer_size_);
+  BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, dup_handle,
+                                   output_buffer_size_);
   LOG_ASSERT(output_buffers_at_client_
                  .insert(std::make_pair(bitstream_buffer.id(), shm))
                  .second);
@@ -1521,10 +1511,9 @@
 }
 
 void VEAClient::WriteIvfFileHeader() {
-  media::IvfFileHeader header = {};
+  IvfFileHeader header = {};
 
-  memcpy(header.signature, media::kIvfHeaderSignature,
-         sizeof(header.signature));
+  memcpy(header.signature, kIvfHeaderSignature, sizeof(header.signature));
   header.version = 0;
   header.header_size = sizeof(header);
   header.fourcc = 0x30385056;  // VP80
@@ -1543,7 +1532,7 @@
 }
 
 void VEAClient::WriteIvfFrameHeader(int frame_index, size_t frame_size) {
-  media::IvfFrameHeader header = {};
+  IvfFrameHeader header = {};
 
   header.frame_size = frame_size;
   header.timestamp = frame_index;
diff --git a/media/gpu/vp8_decoder.cc b/media/gpu/vp8_decoder.cc
index 9f315c4..9d582ed 100644
--- a/media/gpu/vp8_decoder.cc
+++ b/media/gpu/vp8_decoder.cc
@@ -55,7 +55,7 @@
     return kRanOutOfStreamData;
 
   if (!curr_frame_hdr_) {
-    curr_frame_hdr_.reset(new media::Vp8FrameHeader());
+    curr_frame_hdr_.reset(new Vp8FrameHeader());
     if (!parser_.ParseFrame(curr_frame_start_, frame_size_,
                             curr_frame_hdr_.get())) {
       DVLOG(1) << "Error during decode";
@@ -116,12 +116,12 @@
     golden_frame_ = curr_pic_;
   } else {
     switch (curr_frame_hdr_->copy_buffer_to_golden) {
-      case media::Vp8FrameHeader::COPY_LAST_TO_GOLDEN:
+      case Vp8FrameHeader::COPY_LAST_TO_GOLDEN:
         DCHECK(last_frame_);
         golden_frame_ = last_frame_;
         break;
 
-      case media::Vp8FrameHeader::COPY_ALT_TO_GOLDEN:
+      case Vp8FrameHeader::COPY_ALT_TO_GOLDEN:
         DCHECK(alt_frame_);
         golden_frame_ = alt_frame_;
         break;
@@ -132,12 +132,12 @@
     alt_frame_ = curr_pic_;
   } else {
     switch (curr_frame_hdr_->copy_buffer_to_alternate) {
-      case media::Vp8FrameHeader::COPY_LAST_TO_ALT:
+      case Vp8FrameHeader::COPY_LAST_TO_ALT:
         DCHECK(last_frame_);
         alt_frame_ = last_frame_;
         break;
 
-      case media::Vp8FrameHeader::COPY_GOLDEN_TO_ALT:
+      case Vp8FrameHeader::COPY_GOLDEN_TO_ALT:
         DCHECK(curr_golden);
         alt_frame_ = curr_golden;
         break;
@@ -187,7 +187,7 @@
 
 size_t VP8Decoder::GetRequiredNumOfPictures() const {
   const size_t kVP8NumFramesActive = 4;
-  const size_t kPicsInPipeline = media::limits::kMaxVideoFrames + 2;
+  const size_t kPicsInPipeline = limits::kMaxVideoFrames + 2;
   return kVP8NumFramesActive + kPicsInPipeline;
 }
 
diff --git a/media/gpu/vp8_decoder.h b/media/gpu/vp8_decoder.h
index 32ca48b..549271fb 100644
--- a/media/gpu/vp8_decoder.h
+++ b/media/gpu/vp8_decoder.h
@@ -46,7 +46,7 @@
     // Note that this runs the decode in hardware.
     // Return true if successful.
     virtual bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
-                              const media::Vp8FrameHeader* frame_hdr,
+                              const Vp8FrameHeader* frame_hdr,
                               const scoped_refptr<VP8Picture>& last_frame,
                               const scoped_refptr<VP8Picture>& golden_frame,
                               const scoped_refptr<VP8Picture>& alt_frame) = 0;
@@ -66,7 +66,7 @@
   VP8Decoder(VP8Accelerator* accelerator);
   ~VP8Decoder() override;
 
-  // media::AcceleratedVideoDecoder implementation.
+  // AcceleratedVideoDecoder implementation.
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
   void SetStream(const uint8_t* ptr, size_t size) override;
@@ -87,9 +87,9 @@
 
   State state_;
 
-  media::Vp8Parser parser_;
+  Vp8Parser parser_;
 
-  std::unique_ptr<media::Vp8FrameHeader> curr_frame_hdr_;
+  std::unique_ptr<Vp8FrameHeader> curr_frame_hdr_;
   scoped_refptr<VP8Picture> curr_pic_;
   scoped_refptr<VP8Picture> last_frame_;
   scoped_refptr<VP8Picture> golden_frame_;
diff --git a/media/gpu/vp9_decoder.cc b/media/gpu/vp9_decoder.cc
index 58b42ca..ec10b76 100644
--- a/media/gpu/vp9_decoder.cc
+++ b/media/gpu/vp9_decoder.cc
@@ -19,7 +19,7 @@
 VP9Decoder::VP9Decoder(VP9Accelerator* accelerator)
     : state_(kNeedStreamMetadata), accelerator_(accelerator) {
   DCHECK(accelerator_);
-  ref_frames_.resize(media::kVp9NumRefFrames);
+  ref_frames_.resize(kVp9NumRefFrames);
 }
 
 VP9Decoder::~VP9Decoder() {}
@@ -53,17 +53,17 @@
   while (1) {
     // Read a new frame header if one is not awaiting decoding already.
     if (!curr_frame_hdr_) {
-      std::unique_ptr<media::Vp9FrameHeader> hdr(new media::Vp9FrameHeader());
-      media::Vp9Parser::Result res = parser_.ParseNextFrame(hdr.get());
+      std::unique_ptr<Vp9FrameHeader> hdr(new Vp9FrameHeader());
+      Vp9Parser::Result res = parser_.ParseNextFrame(hdr.get());
       switch (res) {
-        case media::Vp9Parser::kOk:
+        case Vp9Parser::kOk:
           curr_frame_hdr_.reset(hdr.release());
           break;
 
-        case media::Vp9Parser::kEOStream:
+        case Vp9Parser::kEOStream:
           return kRanOutOfStreamData;
 
-        case media::Vp9Parser::kInvalidStream:
+        case Vp9Parser::kInvalidStream:
           DVLOG(1) << "Error parsing stream";
           SetError();
           return kDecodeError;
@@ -141,7 +141,7 @@
 }
 
 void VP9Decoder::RefreshReferenceFrames(const scoped_refptr<VP9Picture>& pic) {
-  for (size_t i = 0; i < media::kVp9NumRefFrames; ++i) {
+  for (size_t i = 0; i < kVp9NumRefFrames; ++i) {
     DCHECK(!pic->frame_hdr->IsKeyframe() || pic->frame_hdr->RefreshFlag(i));
     if (pic->frame_hdr->RefreshFlag(i))
       ref_frames_[i] = pic;
@@ -177,7 +177,7 @@
 size_t VP9Decoder::GetRequiredNumOfPictures() const {
   // kMaxVideoFrames to keep higher level media pipeline populated, +2 for the
   // pictures being parsed and decoded currently.
-  return media::limits::kMaxVideoFrames + media::kVp9NumRefFrames + 2;
+  return limits::kMaxVideoFrames + kVp9NumRefFrames + 2;
 }
 
 }  // namespace media
diff --git a/media/gpu/vp9_decoder.h b/media/gpu/vp9_decoder.h
index 20e869e..b53f0a8 100644
--- a/media/gpu/vp9_decoder.h
+++ b/media/gpu/vp9_decoder.h
@@ -59,8 +59,8 @@
     // Return true when successful, false otherwise.
     virtual bool SubmitDecode(
         const scoped_refptr<VP9Picture>& pic,
-        const media::Vp9Segmentation& seg,
-        const media::Vp9LoopFilter& lf,
+        const Vp9Segmentation& seg,
+        const Vp9LoopFilter& lf,
         const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) = 0;
 
     // Schedule output (display) of |pic|.
@@ -82,7 +82,7 @@
   VP9Decoder(VP9Accelerator* accelerator);
   ~VP9Decoder() override;
 
-  // media::AcceleratedVideoDecoder implementation.
+  // AcceleratedVideoDecoder implementation.
   void SetStream(const uint8_t* ptr, size_t size) override;
   bool Flush() override WARN_UNUSED_RESULT;
   void Reset() override;
@@ -113,7 +113,7 @@
   State state_;
 
   // Current frame header to be used in decoding the next picture.
-  std::unique_ptr<media::Vp9FrameHeader> curr_frame_hdr_;
+  std::unique_ptr<Vp9FrameHeader> curr_frame_hdr_;
 
   // Reference frames currently in use.
   std::vector<scoped_refptr<VP9Picture>> ref_frames_;
@@ -121,7 +121,7 @@
   // Current coded resolution.
   gfx::Size pic_size_;
 
-  media::Vp9Parser parser_;
+  Vp9Parser parser_;
 
   // VP9Accelerator instance owned by the client.
   VP9Accelerator* accelerator_;
diff --git a/media/gpu/vp9_picture.h b/media/gpu/vp9_picture.h
index a94ff0ea..7c026fa 100644
--- a/media/gpu/vp9_picture.h
+++ b/media/gpu/vp9_picture.h
@@ -23,7 +23,7 @@
   virtual V4L2VP9Picture* AsV4L2VP9Picture();
   virtual VaapiVP9Picture* AsVaapiVP9Picture();
 
-  std::unique_ptr<media::Vp9FrameHeader> frame_hdr;
+  std::unique_ptr<Vp9FrameHeader> frame_hdr;
 
  protected:
   friend class base::RefCounted<VP9Picture>;
diff --git a/media/gpu/vt_video_decode_accelerator_mac.cc b/media/gpu/vt_video_decode_accelerator_mac.cc
index 1b0845b2..285ae29 100644
--- a/media/gpu/vt_video_decode_accelerator_mac.cc
+++ b/media/gpu/vt_video_decode_accelerator_mac.cc
@@ -43,16 +43,16 @@
 namespace media {
 
 // Only H.264 with 4:2:0 chroma sampling is supported.
-static const media::VideoCodecProfile kSupportedProfiles[] = {
-    media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
-    media::H264PROFILE_EXTENDED, media::H264PROFILE_HIGH,
+static const VideoCodecProfile kSupportedProfiles[] = {
+    H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_EXTENDED,
+    H264PROFILE_HIGH,
     // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
     // that some codecs fail to check the profile during initialization and
     // then fail on the first frame decode, which currently results in a
     // pipeline failure.
-    // media::H264PROFILE_HIGH10PROFILE,
-    media::H264PROFILE_SCALABLEBASELINE, media::H264PROFILE_SCALABLEHIGH,
-    media::H264PROFILE_STEREOHIGH, media::H264PROFILE_MULTIVIEWHIGH,
+    // H264PROFILE_HIGH10PROFILE,
+    H264PROFILE_SCALABLEBASELINE, H264PROFILE_SCALABLEHIGH,
+    H264PROFILE_STEREOHIGH, H264PROFILE_MULTIVIEWHIGH,
 };
 
 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
@@ -62,7 +62,7 @@
 // that we can bind decoded frames to. We need enough to satisfy preroll, and
 // enough to avoid unnecessary stalling, but no more than that. The resource
 // requirements are low, as we don't need the textures to be backed by storage.
-static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
+static const int kNumPictureBuffers = limits::kMaxVideoFrames + 1;
 
 // Maximum number of frames to queue for reordering before we stop asking for
 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the
@@ -461,9 +461,8 @@
   return true;
 }
 
-void VTVideoDecodeAccelerator::DecodeTask(
-    const media::BitstreamBuffer& bitstream,
-    Frame* frame) {
+void VTVideoDecodeAccelerator::DecodeTask(const BitstreamBuffer& bitstream,
+                                          Frame* frame) {
   DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
 
   // Map the bitstream buffer.
@@ -486,32 +485,32 @@
   std::vector<uint8_t> pps;
   bool has_slice = false;
   size_t data_size = 0;
-  std::vector<media::H264NALU> nalus;
+  std::vector<H264NALU> nalus;
   parser_.SetStream(buf, memory.size());
-  media::H264NALU nalu;
+  H264NALU nalu;
   while (true) {
-    media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
-    if (result == media::H264Parser::kEOStream)
+    H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
+    if (result == H264Parser::kEOStream)
       break;
-    if (result == media::H264Parser::kUnsupportedStream) {
+    if (result == H264Parser::kUnsupportedStream) {
       DLOG(ERROR) << "Unsupported H.264 stream";
       NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
       return;
     }
-    if (result != media::H264Parser::kOk) {
+    if (result != H264Parser::kOk) {
       DLOG(ERROR) << "Failed to parse H.264 stream";
       NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
       return;
     }
     switch (nalu.nal_unit_type) {
-      case media::H264NALU::kSPS:
+      case H264NALU::kSPS:
         result = parser_.ParseSPS(&last_sps_id_);
-        if (result == media::H264Parser::kUnsupportedStream) {
+        if (result == H264Parser::kUnsupportedStream) {
           DLOG(ERROR) << "Unsupported SPS";
           NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
           return;
         }
-        if (result != media::H264Parser::kOk) {
+        if (result != H264Parser::kOk) {
           DLOG(ERROR) << "Could not parse SPS";
           NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
           return;
@@ -520,19 +519,19 @@
         spsext.clear();
         break;
 
-      case media::H264NALU::kSPSExt:
+      case H264NALU::kSPSExt:
         // TODO(sandersd): Check that the previous NALU was an SPS.
         spsext.assign(nalu.data, nalu.data + nalu.size);
         break;
 
-      case media::H264NALU::kPPS:
+      case H264NALU::kPPS:
         result = parser_.ParsePPS(&last_pps_id_);
-        if (result == media::H264Parser::kUnsupportedStream) {
+        if (result == H264Parser::kUnsupportedStream) {
           DLOG(ERROR) << "Unsupported PPS";
           NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
           return;
         }
-        if (result != media::H264Parser::kOk) {
+        if (result != H264Parser::kOk) {
           DLOG(ERROR) << "Could not parse PPS";
           NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
           return;
@@ -540,16 +539,16 @@
         pps.assign(nalu.data, nalu.data + nalu.size);
         break;
 
-      case media::H264NALU::kSliceDataA:
-      case media::H264NALU::kSliceDataB:
-      case media::H264NALU::kSliceDataC:
-      case media::H264NALU::kNonIDRSlice:
-      case media::H264NALU::kIDRSlice:
+      case H264NALU::kSliceDataA:
+      case H264NALU::kSliceDataB:
+      case H264NALU::kSliceDataC:
+      case H264NALU::kNonIDRSlice:
+      case H264NALU::kIDRSlice:
         // Compute the |pic_order_cnt| for the picture from the first slice.
         if (!has_slice) {
           // Verify that we are not trying to decode a slice without an IDR.
           if (waiting_for_idr_) {
-            if (nalu.nal_unit_type == media::H264NALU::kIDRSlice) {
+            if (nalu.nal_unit_type == H264NALU::kIDRSlice) {
               waiting_for_idr_ = false;
             } else {
               // We can't compute anything yet, bail on this frame.
@@ -558,14 +557,14 @@
             }
           }
 
-          media::H264SliceHeader slice_hdr;
+          H264SliceHeader slice_hdr;
           result = parser_.ParseSliceHeader(nalu, &slice_hdr);
-          if (result == media::H264Parser::kUnsupportedStream) {
+          if (result == H264Parser::kUnsupportedStream) {
             DLOG(ERROR) << "Unsupported slice header";
             NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
             return;
           }
-          if (result != media::H264Parser::kOk) {
+          if (result != H264Parser::kOk) {
             DLOG(ERROR) << "Could not parse slice header";
             NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
             return;
@@ -574,8 +573,7 @@
           // TODO(sandersd): Maintain a cache of configurations and reconfigure
           // when a slice references a new config.
           DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_);
-          const media::H264PPS* pps =
-              parser_.GetPPS(slice_hdr.pic_parameter_set_id);
+          const H264PPS* pps = parser_.GetPPS(slice_hdr.pic_parameter_set_id);
           if (!pps) {
             DLOG(ERROR) << "Mising PPS referenced by slice";
             NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
@@ -583,7 +581,7 @@
           }
 
           DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_);
-          const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id);
+          const H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id);
           if (!sps) {
             DLOG(ERROR) << "Mising SPS referenced by PPS";
             NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
@@ -596,7 +594,7 @@
             return;
           }
 
-          if (nalu.nal_unit_type == media::H264NALU::kIDRSlice)
+          if (nalu.nal_unit_type == H264NALU::kIDRSlice)
             frame->is_idr = true;
 
           if (sps->vui_parameters_present_flag &&
@@ -709,7 +707,7 @@
   // Copy NALU data into the CMBlockBuffer, inserting length headers.
   size_t offset = 0;
   for (size_t i = 0; i < nalus.size(); i++) {
-    media::H264NALU& nalu = nalus[i];
+    H264NALU& nalu = nalus[i];
     uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
     status =
         CMBlockBufferReplaceDataBytes(&header, data, offset, kNALUHeaderLength);
@@ -824,7 +822,7 @@
   ProcessWorkQueues();
 }
 
-void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
+void VTVideoDecodeAccelerator::Decode(const BitstreamBuffer& bitstream) {
   DCHECK(gpu_thread_checker_.CalledOnValidThread());
   if (bitstream.id() < 0) {
     DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id();
@@ -843,10 +841,10 @@
 }
 
 void VTVideoDecodeAccelerator::AssignPictureBuffers(
-    const std::vector<media::PictureBuffer>& pictures) {
+    const std::vector<PictureBuffer>& pictures) {
   DCHECK(gpu_thread_checker_.CalledOnValidThread());
 
-  for (const media::PictureBuffer& picture : pictures) {
+  for (const PictureBuffer& picture : pictures) {
     DCHECK(!picture_info_map_.count(picture.id()));
     assigned_picture_ids_.insert(picture.id());
     available_picture_ids_.push_back(picture.id());
@@ -1063,8 +1061,8 @@
   // GpuVideoDecoder so that GpuVideoDecoder can use correct visible size in
   // resolution changed. We should find the correct API to get the real
   // coded size and fix it.
-  client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
-                                       gfx::Rect(frame.coded_size), true));
+  client_->PictureReady(Picture(picture_id, frame.bitstream_id,
+                                gfx::Rect(frame.coded_size), true));
   return true;
 }
 
@@ -1134,7 +1132,7 @@
 }
 
 // static
-media::VideoDecodeAccelerator::SupportedProfiles
+VideoDecodeAccelerator::SupportedProfiles
 VTVideoDecodeAccelerator::GetSupportedProfiles() {
   SupportedProfiles profiles;
   for (const auto& supported_profile : kSupportedProfiles) {
diff --git a/media/gpu/vt_video_decode_accelerator_mac.h b/media/gpu/vt_video_decode_accelerator_mac.h
index 2404ea5..4d59148 100644
--- a/media/gpu/vt_video_decode_accelerator_mac.h
+++ b/media/gpu/vt_video_decode_accelerator_mac.h
@@ -35,7 +35,7 @@
 
 // VideoToolbox.framework implementation of the VideoDecodeAccelerator
 // interface for Mac OS X (currently limited to 10.9+).
-class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
+class VTVideoDecodeAccelerator : public VideoDecodeAccelerator {
  public:
   explicit VTVideoDecodeAccelerator(
       const MakeGLContextCurrentCallback& make_context_current_cb,
@@ -45,9 +45,9 @@
 
   // VideoDecodeAccelerator implementation.
   bool Initialize(const Config& config, Client* client) override;
-  void Decode(const media::BitstreamBuffer& bitstream) override;
+  void Decode(const BitstreamBuffer& bitstream) override;
   void AssignPictureBuffers(
-      const std::vector<media::PictureBuffer>& pictures) override;
+      const std::vector<PictureBuffer>& pictures) override;
   void ReusePictureBuffer(int32_t picture_id) override;
   void Flush() override;
   void Reset() override;
@@ -62,8 +62,7 @@
               OSStatus status,
               CVImageBufferRef image_buffer);
 
-  static media::VideoDecodeAccelerator::SupportedProfiles
-  GetSupportedProfiles();
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
 
  private:
   // Logged to UMA, so never reuse values. Make sure to update
@@ -152,8 +151,8 @@
 
   // Compute the |pic_order_cnt| for a frame. Returns true or calls
   // NotifyError() before returning false.
-  bool ComputePicOrderCnt(const media::H264SPS* sps,
-                          const media::H264SliceHeader& slice_hdr,
+  bool ComputePicOrderCnt(const H264SPS* sps,
+                          const H264SliceHeader& slice_hdr,
                           Frame* frame);
 
   // Set up VideoToolbox using the current SPS and PPS. Returns true or calls
@@ -165,7 +164,7 @@
   bool FinishDelayedFrames();
 
   // |frame| is owned by |pending_frames_|.
-  void DecodeTask(const media::BitstreamBuffer&, Frame* frame);
+  void DecodeTask(const BitstreamBuffer&, Frame* frame);
   void DecodeDone(Frame* frame);
 
   //
@@ -196,7 +195,7 @@
   MakeGLContextCurrentCallback make_context_current_cb_;
   BindGLImageCallback bind_image_cb_;
 
-  media::VideoDecodeAccelerator::Client* client_;
+  VideoDecodeAccelerator::Client* client_;
   State state_;
 
   // Queue of pending flush tasks. This is used to drop frames when a reset
@@ -245,7 +244,7 @@
   VTDecompressionOutputCallbackRecord callback_;
   base::ScopedCFTypeRef<CMFormatDescriptionRef> format_;
   base::ScopedCFTypeRef<VTDecompressionSessionRef> session_;
-  media::H264Parser parser_;
+  H264Parser parser_;
   gfx::Size coded_size_;
 
   int last_sps_id_;
@@ -256,7 +255,7 @@
   bool config_changed_;
   bool waiting_for_idr_;
   bool missing_idr_logged_;
-  media::H264POC poc_;
+  H264POC poc_;
 
   //
   // Shared state (set up and torn down on GPU thread).
diff --git a/media/gpu/vt_video_encode_accelerator_mac.cc b/media/gpu/vt_video_encode_accelerator_mac.cc
index a3a65ba..e38efd11 100644
--- a/media/gpu/vt_video_encode_accelerator_mac.cc
+++ b/media/gpu/vt_video_encode_accelerator_mac.cc
@@ -94,7 +94,7 @@
   DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
 }
 
-media::VideoEncodeAccelerator::SupportedProfiles
+VideoEncodeAccelerator::SupportedProfiles
 VTVideoEncodeAccelerator::GetSupportedProfiles() {
   DVLOG(3) << __FUNCTION__;
   DCHECK(thread_checker_.CalledOnValidThread());
@@ -112,7 +112,7 @@
     return profiles;
   }
   const bool rv = CreateCompressionSession(
-      media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
+      video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
       gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
   DestroyCompressionSession();
   if (!rv) {
@@ -122,7 +122,7 @@
   }
 
   SupportedProfile profile;
-  profile.profile = media::H264PROFILE_BASELINE;
+  profile.profile = H264PROFILE_BASELINE;
   profile.max_framerate_numerator = kMaxFrameRateNumerator;
   profile.max_framerate_denominator = kMaxFrameRateDenominator;
   profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
@@ -130,26 +130,25 @@
   return profiles;
 }
 
-bool VTVideoEncodeAccelerator::Initialize(
-    media::VideoPixelFormat format,
-    const gfx::Size& input_visible_size,
-    media::VideoCodecProfile output_profile,
-    uint32_t initial_bitrate,
-    Client* client) {
+bool VTVideoEncodeAccelerator::Initialize(VideoPixelFormat format,
+                                          const gfx::Size& input_visible_size,
+                                          VideoCodecProfile output_profile,
+                                          uint32_t initial_bitrate,
+                                          Client* client) {
   DVLOG(3) << __FUNCTION__
-           << ": input_format=" << media::VideoPixelFormatToString(format)
+           << ": input_format=" << VideoPixelFormatToString(format)
            << ", input_visible_size=" << input_visible_size.ToString()
            << ", output_profile=" << output_profile
            << ", initial_bitrate=" << initial_bitrate;
   DCHECK(thread_checker_.CalledOnValidThread());
   DCHECK(client);
 
-  if (media::PIXEL_FORMAT_I420 != format) {
+  if (PIXEL_FORMAT_I420 != format) {
     DLOG(ERROR) << "Input format not supported= "
-                << media::VideoPixelFormatToString(format);
+                << VideoPixelFormatToString(format);
     return false;
   }
-  if (media::H264PROFILE_BASELINE != output_profile) {
+  if (H264PROFILE_BASELINE != output_profile) {
     DLOG(ERROR) << "Output profile not supported= " << output_profile;
     return false;
   }
@@ -190,9 +189,8 @@
   return true;
 }
 
-void VTVideoEncodeAccelerator::Encode(
-    const scoped_refptr<media::VideoFrame>& frame,
-    bool force_keyframe) {
+void VTVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+                                      bool force_keyframe) {
   DVLOG(3) << __FUNCTION__;
   DCHECK(thread_checker_.CalledOnValidThread());
 
@@ -202,7 +200,7 @@
 }
 
 void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
-    const media::BitstreamBuffer& buffer) {
+    const BitstreamBuffer& buffer) {
   DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
   DCHECK(thread_checker_.CalledOnValidThread());
 
@@ -261,7 +259,7 @@
 }
 
 void VTVideoEncodeAccelerator::EncodeTask(
-    const scoped_refptr<media::VideoFrame>& frame,
+    const scoped_refptr<VideoFrame>& frame,
     bool force_keyframe) {
   DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK(compression_session_);
@@ -270,15 +268,15 @@
   // TODO(emircan): See if we can eliminate a copy here by using
   // CVPixelBufferPool for the allocation of incoming VideoFrames.
   base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
-      media::WrapVideoFrameInCVPixelBuffer(*frame);
+      WrapVideoFrameInCVPixelBuffer(*frame);
   base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
-      media::video_toolbox::DictionaryWithKeyValue(
+      video_toolbox::DictionaryWithKeyValue(
           videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
           force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
 
   base::TimeTicks ref_time;
-  if (!frame->metadata()->GetTimeTicks(
-          media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
+  if (!frame->metadata()->GetTimeTicks(VideoFrameMetadata::REFERENCE_TIME,
+                                       &ref_time)) {
     ref_time = base::TimeTicks::Now();
   }
   auto timestamp_cm = CoreMediaGlue::CMTimeMake(
@@ -332,7 +330,7 @@
   }
 
   if (framerate != static_cast<uint32_t>(frame_rate_)) {
-    media::video_toolbox::SessionPropertySetter session_property_setter(
+    video_toolbox::SessionPropertySetter session_property_setter(
         compression_session_, videotoolbox_glue_);
     session_property_setter.Set(
         videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
@@ -353,14 +351,14 @@
     return;
 
   encoder_set_bitrate_ = bitrate;
-  media::video_toolbox::SessionPropertySetter session_property_setter(
+  video_toolbox::SessionPropertySetter session_property_setter(
       compression_session_, videotoolbox_glue_);
   bool rv = session_property_setter.Set(
       videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
       encoder_set_bitrate_);
   rv &= session_property_setter.Set(
       videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
-      media::video_toolbox::ArrayWithIntegerAndFloat(
+      video_toolbox::ArrayWithIntegerAndFloat(
           encoder_set_bitrate_ / kBitsPerByte, 1.0f));
   DLOG_IF(ERROR, !rv)
       << "Couldn't change bitrate parameters of encode session.";
@@ -379,7 +377,7 @@
 }
 
 void VTVideoEncodeAccelerator::NotifyError(
-    media::VideoEncodeAccelerator::Error error) {
+    VideoEncodeAccelerator::Error error) {
   DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
   client_task_runner_->PostTask(
       FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
@@ -462,7 +460,7 @@
       sample_attachments, CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
 
   size_t used_buffer_size = 0;
-  const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
+  const bool copy_rv = video_toolbox::CopySampleBufferToAnnexBBuffer(
       encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
       reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
   if (!copy_rv) {
@@ -489,12 +487,10 @@
       CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
   CFTypeRef attributes_values[] = {
       kCFBooleanTrue,
-      media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
-          .release(),
-      media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
-          .release()};
+      video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0).release(),
+      video_toolbox::ArrayWithIntegers(format, arraysize(format)).release()};
   const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
-      media::video_toolbox::DictionaryWithKeysAndValues(
+      video_toolbox::DictionaryWithKeysAndValues(
           attributes_keys, attributes_values, arraysize(attributes_keys));
   for (auto& v : attributes_values)
     CFRelease(v);
@@ -532,7 +528,7 @@
     encoder_values.push_back(kCFBooleanTrue);
   }
   base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
-      media::video_toolbox::DictionaryWithKeysAndValues(
+      video_toolbox::DictionaryWithKeysAndValues(
           encoder_keys.data(), encoder_values.data(), encoder_keys.size());
 
   // Create the compression session.
@@ -563,7 +559,7 @@
   DCHECK(thread_checker_.CalledOnValidThread());
   DCHECK(compression_session_);
 
-  media::video_toolbox::SessionPropertySetter session_property_setter(
+  video_toolbox::SessionPropertySetter session_property_setter(
       compression_session_, videotoolbox_glue_);
   bool rv = true;
   rv &= session_property_setter.Set(
diff --git a/media/gpu/vt_video_encode_accelerator_mac.h b/media/gpu/vt_video_encode_accelerator_mac.h
index b36e41f..cb0cf66d 100644
--- a/media/gpu/vt_video_encode_accelerator_mac.h
+++ b/media/gpu/vt_video_encode_accelerator_mac.h
@@ -20,22 +20,21 @@
 // interface for MacOSX. VideoToolbox makes no guarantees that it is thread
 // safe, so this object is pinned to the thread on which it is constructed.
 class MEDIA_GPU_EXPORT VTVideoEncodeAccelerator
-    : public media::VideoEncodeAccelerator {
+    : public VideoEncodeAccelerator {
  public:
   VTVideoEncodeAccelerator();
   ~VTVideoEncodeAccelerator() override;
 
-  // media::VideoEncodeAccelerator implementation.
-  media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
-      override;
-  bool Initialize(media::VideoPixelFormat format,
+  // VideoEncodeAccelerator implementation.
+  VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+  bool Initialize(VideoPixelFormat format,
                   const gfx::Size& input_visible_size,
-                  media::VideoCodecProfile output_profile,
+                  VideoCodecProfile output_profile,
                   uint32_t initial_bitrate,
                   Client* client) override;
-  void Encode(const scoped_refptr<media::VideoFrame>& frame,
+  void Encode(const scoped_refptr<VideoFrame>& frame,
               bool force_keyframe) override;
-  void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+  void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
   void RequestEncodingParametersChange(uint32_t bitrate,
                                        uint32_t framerate) override;
   void Destroy() override;
@@ -55,8 +54,7 @@
   struct BitstreamBufferRef;
 
   // Encoding tasks to be run on |encoder_thread_|.
-  void EncodeTask(const scoped_refptr<media::VideoFrame>& frame,
-                  bool force_keyframe);
+  void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
   void UseOutputBitstreamBufferTask(
       std::unique_ptr<BitstreamBufferRef> buffer_ref);
   void RequestEncodingParametersChangeTask(uint32_t bitrate,
@@ -67,7 +65,7 @@
   void SetAdjustedBitrate(int32_t bitrate);
 
   // Helper function to notify the client of an error on |client_task_runner_|.
-  void NotifyError(media::VideoEncodeAccelerator::Error error);
+  void NotifyError(VideoEncodeAccelerator::Error error);
 
   // Compression session callback function to handle compressed frames.
   static void CompressionCallback(void* encoder_opaque,