[go: nahoru, domu]

disk_cache: Make GetAvailableRange reasonable to use safely.

Before this change, it took an out pointer parameter, which is really messy to manage destination lifetime for in a net-style method. (See the bug for a couple of different ways it could go wrong)

This change it to return (and pass to callbacks) the result by value instead, which avoids the complications entirely.


Bug: 1208738

Change-Id: I01f07ac693bb25266f91c3cd1ec4d69c023a1891
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2983259
Commit-Queue: Maksim Orlovich <morlovich@chromium.org>
Reviewed-by: Ben Kelly <wanderview@chromium.org>
Reviewed-by: Matthew Denton <mpdenton@chromium.org>
Reviewed-by: Shivani Sharma <shivanisha@chromium.org>
Cr-Commit-Position: refs/heads/master@{#898234}
diff --git a/content/browser/cache_storage/cache_storage_cache_unittest.cc b/content/browser/cache_storage/cache_storage_cache_unittest.cc
index 078d1bb..648c1307 100644
--- a/content/browser/cache_storage/cache_storage_cache_unittest.cc
+++ b/content/browser/cache_storage/cache_storage_cache_unittest.cc
@@ -245,11 +245,11 @@
                       CompletionOnceCallback callback) override {
     return entry_->WriteSparseData(offset, buf, buf_len, std::move(callback));
   }
-  int GetAvailableRange(int64_t offset,
-                        int len,
-                        int64_t* start,
-                        CompletionOnceCallback callback) override {
-    return entry_->GetAvailableRange(offset, len, start, std::move(callback));
+  disk_cache::RangeResult GetAvailableRange(
+      int64_t offset,
+      int len,
+      disk_cache::RangeResultCallback callback) override {
+    return entry_->GetAvailableRange(offset, len, std::move(callback));
   }
   bool CouldBeSparse() const override { return entry_->CouldBeSparse(); }
   void CancelSparseIO() override { entry_->CancelSparseIO(); }
diff --git a/net/disk_cache/blockfile/entry_impl.cc b/net/disk_cache/blockfile/entry_impl.cc
index 9d697a8..1dc20bb 100644
--- a/net/disk_cache/blockfile/entry_impl.cc
+++ b/net/disk_cache/blockfile/entry_impl.cc
@@ -405,12 +405,12 @@
   return result;
 }
 
-int EntryImpl::GetAvailableRangeImpl(int64_t offset, int len, int64_t* start) {
+RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
   int result = InitSparseData();
   if (net::OK != result)
-    return result;
+    return RangeResult(static_cast<net::Error>(result));
 
-  return sparse_->GetAvailableRange(offset, len, start);
+  return sparse_->GetAvailableRange(offset, len);
 }
 
 void EntryImpl::CancelSparseIOImpl() {
@@ -922,16 +922,14 @@
   return net::ERR_IO_PENDING;
 }
 
-int EntryImpl::GetAvailableRange(int64_t offset,
-                                 int len,
-                                 int64_t* start,
-                                 CompletionOnceCallback callback) {
+RangeResult EntryImpl::GetAvailableRange(int64_t offset,
+                                         int len,
+                                         RangeResultCallback callback) {
   if (!background_queue_.get())
-    return net::ERR_UNEXPECTED;
+    return RangeResult(net::ERR_UNEXPECTED);
 
-  background_queue_->GetAvailableRange(this, offset, len, start,
-                                       std::move(callback));
-  return net::ERR_IO_PENDING;
+  background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
+  return RangeResult(net::ERR_IO_PENDING);
 }
 
 bool EntryImpl::CouldBeSparse() const {
diff --git a/net/disk_cache/blockfile/entry_impl.h b/net/disk_cache/blockfile/entry_impl.h
index 24da1f9..e1cf14d 100644
--- a/net/disk_cache/blockfile/entry_impl.h
+++ b/net/disk_cache/blockfile/entry_impl.h
@@ -71,7 +71,7 @@
                           IOBuffer* buf,
                           int buf_len,
                           CompletionOnceCallback callback);
-  int GetAvailableRangeImpl(int64_t offset, int len, int64_t* start);
+  RangeResult GetAvailableRangeImpl(int64_t offset, int len);
   void CancelSparseIOImpl();
   int ReadyForSparseIOImpl(CompletionOnceCallback callback);
 
@@ -193,10 +193,9 @@
                       IOBuffer* buf,
                       int buf_len,
                       CompletionOnceCallback callback) override;
-  int GetAvailableRange(int64_t offset,
-                        int len,
-                        int64_t* start,
-                        CompletionOnceCallback callback) override;
+  RangeResult GetAvailableRange(int64_t offset,
+                                int len,
+                                RangeResultCallback callback) override;
   bool CouldBeSparse() const override;
   void CancelSparseIO() override;
   net::Error ReadyForSparseIO(CompletionOnceCallback callback) override;
diff --git a/net/disk_cache/blockfile/in_flight_backend_io.cc b/net/disk_cache/blockfile/in_flight_backend_io.cc
index cc9bcf4..c1dc7656 100644
--- a/net/disk_cache/blockfile/in_flight_backend_io.cc
+++ b/net/disk_cache/blockfile/in_flight_backend_io.cc
@@ -49,6 +49,13 @@
   entry_result_callback_ = std::move(callback);
 }
 
+BackendIO::BackendIO(InFlightIO* controller,
+                     BackendImpl* backend,
+                     RangeResultCallback callback)
+    : BackendIO(controller, backend) {
+  range_result_callback_ = std::move(callback);
+}
+
 BackendIO::BackendIO(InFlightIO* controller, BackendImpl* backend)
     : BackgroundIO(controller),
       backend_(backend),
@@ -61,8 +68,7 @@
       offset_(0),
       buf_len_(0),
       truncate_(false),
-      offset64_(0),
-      start_(nullptr) {
+      offset64_(0) {
   start_time_ = base::TimeTicks::Now();
 }
 
@@ -115,6 +121,10 @@
   std::move(entry_result_callback_).Run(std::move(entry_result));
 }
 
+void BackendIO::RunRangeResultCallback() {
+  std::move(range_result_callback_).Run(range_result_);
+}
+
 void BackendIO::Init() {
   operation_ = OP_INIT;
 }
@@ -236,15 +246,11 @@
   buf_len_ = buf_len;
 }
 
-void BackendIO::GetAvailableRange(EntryImpl* entry,
-                                  int64_t offset,
-                                  int len,
-                                  int64_t* start) {
+void BackendIO::GetAvailableRange(EntryImpl* entry, int64_t offset, int len) {
   operation_ = OP_GET_RANGE;
   entry_ = entry;
   offset64_ = offset;
   buf_len_ = len;
-  start_ = start;
 }
 
 void BackendIO::CancelSparseIO(EntryImpl* entry) {
@@ -384,7 +390,8 @@
           base::BindOnce(&BackendIO::OnIOComplete, this));
       break;
     case OP_GET_RANGE:
-      result_ = entry_->GetAvailableRangeImpl(offset64_, buf_len_, start_);
+      range_result_ = entry_->GetAvailableRangeImpl(offset64_, buf_len_);
+      result_ = range_result_.net_error;
       break;
     case OP_CANCEL_IO:
       entry_->CancelSparseIOImpl();
@@ -581,15 +588,13 @@
   PostOperation(FROM_HERE, operation.get());
 }
 
-void InFlightBackendIO::GetAvailableRange(
-    EntryImpl* entry,
-    int64_t offset,
-    int len,
-    int64_t* start,
-    net::CompletionOnceCallback callback) {
+void InFlightBackendIO::GetAvailableRange(EntryImpl* entry,
+                                          int64_t offset,
+                                          int len,
+                                          RangeResultCallback callback) {
   scoped_refptr<BackendIO> operation(
       new BackendIO(this, backend_, std::move(callback)));
-  operation->GetAvailableRange(entry, offset, len, start);
+  operation->GetAvailableRange(entry, offset, len);
   PostOperation(FROM_HERE, operation.get());
 }
 
@@ -620,6 +625,11 @@
   if (op->has_callback() && (!cancel || op->IsEntryOperation()))
     op->RunCallback(op->result());
 
+  if (op->has_range_result_callback()) {
+    DCHECK(op->IsEntryOperation());
+    op->RunRangeResultCallback();
+  }
+
   if (op->has_entry_result_callback() && !cancel) {
     DCHECK(!op->IsEntryOperation());
     op->RunEntryResultCallback();
diff --git a/net/disk_cache/blockfile/in_flight_backend_io.h b/net/disk_cache/blockfile/in_flight_backend_io.h
index d06436e..a84a2e3e 100644
--- a/net/disk_cache/blockfile/in_flight_backend_io.h
+++ b/net/disk_cache/blockfile/in_flight_backend_io.h
@@ -41,6 +41,10 @@
             BackendImpl* backend,
             EntryResultCallback callback);
 
+  BackendIO(InFlightIO* controller,
+            BackendImpl* backend,
+            RangeResultCallback callback);
+
   // Runs the actual operation on the background thread.
   void ExecuteOperation();
 
@@ -62,6 +66,11 @@
   }
   void RunEntryResultCallback();
 
+  bool has_range_result_callback() const {
+    return !range_result_callback_.is_null();
+  }
+  void RunRangeResultCallback();
+
   // The operations we proxy:
   void Init();
   void OpenOrCreateEntry(const std::string& key);
@@ -92,10 +101,7 @@
                        int64_t offset,
                        net::IOBuffer* buf,
                        int buf_len);
-  void GetAvailableRange(EntryImpl* entry,
-                         int64_t offset,
-                         int len,
-                         int64_t* start);
+  void GetAvailableRange(EntryImpl* entry, int64_t offset, int len);
   void CancelSparseIO(EntryImpl* entry);
   void ReadyForSparseIO(EntryImpl* entry);
 
@@ -155,6 +161,10 @@
   Entry* out_entry_;  // if set, already has the user's ref added.
   bool out_entry_opened_;
 
+  // For GetAvailableRange
+  RangeResultCallback range_result_callback_;
+  RangeResult range_result_;
+
   // The arguments of all the operations we proxy:
   std::string key_;
   base::Time initial_time_;
@@ -168,7 +178,6 @@
   int buf_len_;
   bool truncate_;
   int64_t offset64_;
-  int64_t* start_;
   base::TimeTicks start_time_;
   base::OnceClosure task_;
 
@@ -230,8 +239,7 @@
   void GetAvailableRange(EntryImpl* entry,
                          int64_t offset,
                          int len,
-                         int64_t* start,
-                         net::CompletionOnceCallback callback);
+                         RangeResultCallback callback);
   void CancelSparseIO(EntryImpl* entry);
   void ReadyForSparseIO(EntryImpl* entry, net::CompletionOnceCallback callback);
 
diff --git a/net/disk_cache/blockfile/sparse_control.cc b/net/disk_cache/blockfile/sparse_control.cc
index a914ac0da..0a1fee4 100644
--- a/net/disk_cache/blockfile/sparse_control.cc
+++ b/net/disk_cache/blockfile/sparse_control.cc
@@ -336,25 +336,23 @@
   return net::ERR_IO_PENDING;
 }
 
-int SparseControl::GetAvailableRange(int64_t offset, int len, int64_t* start) {
+RangeResult SparseControl::GetAvailableRange(int64_t offset, int len) {
   DCHECK(init_);
   // We don't support simultaneous IO for sparse data.
   if (operation_ != kNoOperation)
-    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
-  DCHECK(start);
+    return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
 
   range_found_ = false;
   int result = StartIO(kGetRangeOperation, offset, nullptr, len,
                        CompletionOnceCallback());
-  if (range_found_) {
-    *start = offset_;
-    return result;
-  }
+  if (range_found_)
+    return RangeResult(offset_, result);
 
-  // This is a failure. We want to return a valid start value in any case.
-  *start = offset;
-  return result < 0 ? result : 0;  // Don't mask error codes to the caller.
+  // This is a failure. We want to return a valid start value if it's just an
+  // empty range, though.
+  if (result < 0)
+    return RangeResult(static_cast<net::Error>(result));
+  return RangeResult(offset, 0);
 }
 
 void SparseControl::CancelIO() {
@@ -720,7 +718,8 @@
   // |finished_| to true.
   if (kGetRangeOperation == operation_ && entry_->net_log().IsCapturing()) {
     entry_->net_log().EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
-      return CreateNetLogGetAvailableRangeResultParams(offset_, result_);
+      return CreateNetLogGetAvailableRangeResultParams(
+          RangeResult(offset_, result_));
     });
   }
   if (finished_) {
diff --git a/net/disk_cache/blockfile/sparse_control.h b/net/disk_cache/blockfile/sparse_control.h
index dfe16b2..ebd6c91 100644
--- a/net/disk_cache/blockfile/sparse_control.h
+++ b/net/disk_cache/blockfile/sparse_control.h
@@ -15,6 +15,7 @@
 #include "net/base/completion_once_callback.h"
 #include "net/disk_cache/blockfile/bitmap.h"
 #include "net/disk_cache/blockfile/disk_format.h"
+#include "net/disk_cache/disk_cache.h"
 
 namespace net {
 class IOBuffer;
@@ -23,7 +24,6 @@
 
 namespace disk_cache {
 
-class Entry;
 class EntryImpl;
 
 // This class provides support for the sparse capabilities of the disk cache.
@@ -68,7 +68,7 @@
               CompletionOnceCallback callback);
 
   // Implements Entry::GetAvailableRange().
-  int GetAvailableRange(int64_t offset, int len, int64_t* start);
+  RangeResult GetAvailableRange(int64_t offset, int len);
 
   // Cancels the current sparse operation (if any).
   void CancelIO();
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
index 579600e3..4500dba 100644
--- a/net/disk_cache/disk_cache.h
+++ b/net/disk_cache/disk_cache.h
@@ -47,7 +47,9 @@
 class Entry;
 class Backend;
 class EntryResult;
+struct RangeResult;
 using EntryResultCallback = base::OnceCallback<void(EntryResult)>;
+using RangeResultCallback = base::OnceCallback<void(const RangeResult&)>;
 
 // How to handle resetting the back-end cache from the previous session.
 // See CreateCacheBackend() for its usage.
@@ -295,8 +297,10 @@
 // This interface represents an entry in the disk cache.
 class NET_EXPORT Entry {
  public:
-  typedef net::CompletionOnceCallback CompletionOnceCallback;
-  typedef net::IOBuffer IOBuffer;
+  using CompletionOnceCallback = net::CompletionOnceCallback;
+  using IOBuffer = net::IOBuffer;
+  using RangeResultCallback = disk_cache::RangeResultCallback;
+  using RangeResult = disk_cache::RangeResult;
 
   // Marks this cache entry for deletion.
   virtual void Doom() = 0;
@@ -413,17 +417,11 @@
 
   // Returns information about the currently stored portion of a sparse entry.
   // |offset| and |len| describe a particular range that should be scanned to
-  // find out if it is stored or not. |start| will contain the offset of the
-  // first byte that is stored within this range, and the return value is the
-  // minimum number of consecutive stored bytes. Note that it is possible that
-  // this entry has stored more than the returned value. This method returns a
-  // net error code whenever the request cannot be completed successfully. If
-  // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
-  // operation completes, and |start| must remain valid until that point.
-  virtual int GetAvailableRange(int64_t offset,
-                                int len,
-                                int64_t* start,
-                                CompletionOnceCallback callback) = 0;
+  // find out if it is stored or not. Please see the documentation of
+  // RangeResult for more details.
+  virtual RangeResult GetAvailableRange(int64_t offset,
+                                        int len,
+                                        RangeResultCallback callback) = 0;
 
   // Returns true if this entry could be a sparse entry or false otherwise. This
   // is a quick test that may return true even if the entry is not really
@@ -522,6 +520,37 @@
   ScopedEntryPtr entry_;
 };
 
+// Represents a result of GetAvailableRange.
+struct NET_EXPORT RangeResult {
+  RangeResult() = default;
+  explicit RangeResult(net::Error error) : net_error(error) {}
+
+  RangeResult(int64_t start, int available_len)
+      : net_error(net::OK), start(start), available_len(available_len) {}
+
+  // This is net::OK if operation succeeded, and `start` and `available_len`
+  // were set appropriately (potentially with 0 for `available_len`).
+  //
+  // In return value of GetAvailableRange(), net::ERR_IO_PENDING means that the
+  // result will be provided asynchronously via the callback. This can not occur
+  // in the value passed to the callback itself.
+  //
+  // In case the operation failed, this will be the error code.
+  net::Error net_error = net::ERR_FAILED;
+
+  // First byte within the range passed to GetAvailableRange that's available
+  // in the cache entry.
+  //
+  // Valid iff net_error is net::OK.
+  int64_t start = -1;
+
+  // Number of consecutive bytes stored within the requested range starting from
+  // `start` that can be read at once. This may be zero.
+  //
+  // Valid iff net_error is net::OK.
+  int available_len = 0;
+};
+
 }  // namespace disk_cache
 
 #endif  // NET_DISK_CACHE_DISK_CACHE_H_
diff --git a/net/disk_cache/disk_cache_fuzzer.cc b/net/disk_cache/disk_cache_fuzzer.cc
index ee4c103..b2618c8 100644
--- a/net/disk_cache/disk_cache_fuzzer.cc
+++ b/net/disk_cache/disk_cache_fuzzer.cc
@@ -985,22 +985,15 @@
         uint32_t offset = gar.offset() % kMaxEntrySize;
         uint32_t len = gar.len() % kMaxEntrySize;
         bool async = gar.async();
-        auto start = base::MakeRefCounted<base::RefCountedData<int64_t>>();
-        // Raw pointer will stay alive until the end of this command for sure,
-        // as we hold a reference to the object.
-        int64_t* start_tmp = &start->data;
 
         auto result_checker = base::BindRepeating(
-            [](net::CompletionOnceCallback callback,
-               scoped_refptr<base::RefCountedData<int64_t>> start,
-               uint32_t offset, uint32_t len, int rv) {
-              std::move(callback).Run(rv);
+            [](net::CompletionOnceCallback callback, uint32_t offset,
+               uint32_t len, const disk_cache::RangeResult& result) {
+              std::move(callback).Run(result.net_error);
 
-              if (rv <= 0)
+              if (result.net_error <= 0)
                 return;
 
-              int64_t* start_tmp = &start->data;
-
               // Make sure that the result is contained in what was
               // requested. It doesn't have to be the same even if there was
               // an exact corresponding write, since representation of ranges
@@ -1010,41 +1003,45 @@
               net::Interval<uint32_t> requested(offset, offset + len);
 
               uint32_t range_start, range_end;
-              base::CheckedNumeric<uint64_t> range_start64(*start_tmp);
+              base::CheckedNumeric<uint64_t> range_start64(result.start);
               CHECK(range_start64.AssignIfValid(&range_start));
-              base::CheckedNumeric<uint64_t> range_end64 = range_start + rv;
+              base::CheckedNumeric<uint64_t> range_end64 =
+                  range_start + result.available_len;
               CHECK(range_end64.AssignIfValid(&range_end));
               net::Interval<uint32_t> gotten(range_start, range_end);
 
               CHECK(requested.Contains(gotten));
             },
-            GetIOCallback(IOType::GetAvailableRange), start, offset, len);
+            GetIOCallback(IOType::GetAvailableRange), offset, len);
 
-        net::TestCompletionCallback tcb;
-        net::CompletionOnceCallback cb =
+        TestRangeResultCompletionCallback tcb;
+        disk_cache::RangeResultCallback cb =
             !async ? tcb.callback() : result_checker;
 
         MAYBE_PRINT << "GetAvailableRange(\"" << entry->GetKey() << "\", "
                     << offset << ", " << len << ")" << std::flush;
-        int rv =
-            entry->GetAvailableRange(offset, len, start_tmp, std::move(cb));
+        disk_cache::RangeResult result =
+            entry->GetAvailableRange(offset, len, std::move(cb));
 
-        if (rv != net::ERR_IO_PENDING) {
+        if (result.net_error != net::ERR_IO_PENDING) {
           // Run the checker callback ourselves.
-          result_checker.Run(rv);
+          result_checker.Run(result);
         } else if (!async) {
           // In this case the callback will be run by the backend, so we don't
           // need to do it manually.
-          rv = tcb.GetResult(rv);
+          result = tcb.GetResult(result);
         }
 
         // Finally, take care of printing.
-        if (async && rv == net::ERR_IO_PENDING) {
+        if (async && result.net_error == net::ERR_IO_PENDING) {
           MAYBE_PRINT << " = net::ERR_IO_PENDING (async)" << std::endl;
         } else {
-          MAYBE_PRINT << " = " << rv << ", *start = " << *start_tmp;
-          if (rv < 0) {
-            MAYBE_PRINT << ", error to string: " << net::ErrorToShortString(rv)
+          MAYBE_PRINT << " = " << result.net_error
+                      << ", start = " << result.start
+                      << ", available_len = " << result.available_len;
+          if (result.net_error < 0) {
+            MAYBE_PRINT << ", error to string: "
+                        << net::ErrorToShortString(result.net_error)
                         << std::endl;
           } else {
             MAYBE_PRINT << std::endl;
diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc
index ae5b73d..f7467ec 100644
--- a/net/disk_cache/disk_cache_test_base.cc
+++ b/net/disk_cache/disk_cache_test_base.cc
@@ -302,9 +302,15 @@
                                               int64_t offset,
                                               int len,
                                               int64_t* start) {
-  net::TestCompletionCallback cb;
-  int rv = entry->GetAvailableRange(offset, len, start, cb.callback());
-  return cb.GetResult(rv);
+  TestRangeResultCompletionCallback cb;
+  disk_cache::RangeResult result =
+      cb.GetResult(entry->GetAvailableRange(offset, len, cb.callback()));
+
+  if (result.net_error == net::OK) {
+    *start = result.start;
+    return result.available_len;
+  }
+  return result.net_error;
 }
 
 void DiskCacheTestWithCache::TrimForTest(bool empty) {
diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h
index f7b5395b..0b30580 100644
--- a/net/disk_cache/disk_cache_test_base.h
+++ b/net/disk_cache/disk_cache_test_base.h
@@ -168,6 +168,7 @@
                       int64_t offset,
                       net::IOBuffer* buf,
                       int len);
+  // TODO(morlovich): Port all the tests using this to RangeResult.
   int GetAvailableRange(disk_cache::Entry* entry,
                         int64_t offset,
                         int len,
diff --git a/net/disk_cache/disk_cache_test_util.cc b/net/disk_cache/disk_cache_test_util.cc
index 959fa88b..d18d917 100644
--- a/net/disk_cache/disk_cache_test_util.cc
+++ b/net/disk_cache/disk_cache_test_util.cc
@@ -92,6 +92,22 @@
                         base::Unretained(this));
 }
 
+TestRangeResultCompletionCallback::TestRangeResultCompletionCallback() =
+    default;
+
+TestRangeResultCompletionCallback::~TestRangeResultCompletionCallback() =
+    default;
+
+disk_cache::RangeResultCallback TestRangeResultCompletionCallback::callback() {
+  return base::BindOnce(&TestRangeResultCompletionCallback::HelpSetResult,
+                        base::Unretained(this));
+}
+
+void TestRangeResultCompletionCallback::HelpSetResult(
+    const disk_cache::RangeResult& result) {
+  SetResult(result);
+}
+
 // -----------------------------------------------------------------------
 
 MessageLoopHelper::MessageLoopHelper()
diff --git a/net/disk_cache/disk_cache_test_util.h b/net/disk_cache/disk_cache_test_util.h
index 24c7573..f7e2fa7 100644
--- a/net/disk_cache/disk_cache_test_util.h
+++ b/net/disk_cache/disk_cache_test_util.h
@@ -62,6 +62,29 @@
   DISALLOW_COPY_AND_ASSIGN(TestEntryResultCompletionCallback);
 };
 
+// Like net::TestCompletionCallback, but for RangeResultCallback.
+struct RangeResultIsPendingHelper {
+  bool operator()(const disk_cache::RangeResult& result) const {
+    return result.net_error == net::ERR_IO_PENDING;
+  }
+};
+
+class TestRangeResultCompletionCallback
+    : public net::internal::TestCompletionCallbackTemplate<
+          disk_cache::RangeResult,
+          RangeResultIsPendingHelper> {
+ public:
+  TestRangeResultCompletionCallback();
+  ~TestRangeResultCompletionCallback() override;
+
+  disk_cache::RangeResultCallback callback();
+
+ private:
+  // Reference -> Value adapter --- disk_cache wants reference for callback,
+  // base class wants a value.
+  void HelpSetResult(const disk_cache::RangeResult& result);
+};
+
 // -----------------------------------------------------------------------
 
 // Simple helper to deal with the message loop on a test.
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index 3153413..369a688 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -17,6 +17,7 @@
 #include "base/test/metrics/histogram_tester.h"
 #include "base/test/scoped_feature_list.h"
 #include "base/threading/platform_thread.h"
+#include "build/build_config.h"
 #include "net/base/completion_once_callback.h"
 #include "net/base/io_buffer.h"
 #include "net/base/net_errors.h"
@@ -45,6 +46,7 @@
 using base::Time;
 using disk_cache::EntryResult;
 using disk_cache::EntryResultCallback;
+using disk_cache::RangeResult;
 using disk_cache::ScopedEntryPtr;
 
 // Tests that can run with different types of caches.
@@ -1734,57 +1736,66 @@
   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
 
   // We stop at the first empty block.
-  int64_t start;
-  net::TestCompletionCallback cb;
-  int rv = entry->GetAvailableRange(
-      0x20F0000, kSize * 2, &start, cb.callback());
-  EXPECT_EQ(kSize, cb.GetResult(rv));
-  EXPECT_EQ(0x20F0000, start);
+  TestRangeResultCompletionCallback cb;
+  RangeResult result = cb.GetResult(
+      entry->GetAvailableRange(0x20F0000, kSize * 2, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(kSize, result.available_len);
+  EXPECT_EQ(0x20F0000, result.start);
 
-  start = 0;
-  rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
-  rv = entry->GetAvailableRange(
-      0x20F0000 - kSize, kSize, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
-  rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
-  EXPECT_EQ(kSize, cb.GetResult(rv));
-  EXPECT_EQ(0x20F0000, start);
+  result = cb.GetResult(entry->GetAvailableRange(0, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
+
+  result = cb.GetResult(
+      entry->GetAvailableRange(0x20F0000 - kSize, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
+
+  result = cb.GetResult(entry->GetAvailableRange(0, 0x2100000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(kSize, result.available_len);
+  EXPECT_EQ(0x20F0000, result.start);
 
   // We should be able to Read based on the results of GetAvailableRange.
-  start = -1;
-  rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
-  rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
+  net::TestCompletionCallback read_cb;
+  result =
+      cb.GetResult(entry->GetAvailableRange(0x2100000, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
+  int rv =
+      entry->ReadSparseData(result.start, buf.get(), kSize, read_cb.callback());
+  EXPECT_EQ(0, read_cb.GetResult(rv));
 
-  start = 0;
-  rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
-  EXPECT_EQ(0x2000, cb.GetResult(rv));
-  EXPECT_EQ(0x20F2000, start);
-  EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
+  result =
+      cb.GetResult(entry->GetAvailableRange(0x20F2000, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0x2000, result.available_len);
+  EXPECT_EQ(0x20F2000, result.start);
+  EXPECT_EQ(0x2000, ReadSparseData(entry, result.start, buf.get(), kSize));
 
   // Make sure that we respect the |len| argument.
-  start = 0;
-  rv = entry->GetAvailableRange(
-      0x20F0001 - kSize, kSize, &start, cb.callback());
-  EXPECT_EQ(1, cb.GetResult(rv));
-  EXPECT_EQ(0x20F0000, start);
+  result = cb.GetResult(
+      entry->GetAvailableRange(0x20F0001 - kSize, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1, result.available_len);
+  EXPECT_EQ(0x20F0000, result.start);
 
   // Use very small ranges. Write at offset 50.
   const int kTinyLen = 10;
   EXPECT_EQ(kTinyLen, WriteSparseData(entry, 50, buf.get(), kTinyLen));
 
-  start = -1;
-  rv = entry->GetAvailableRange(kTinyLen * 2, kTinyLen, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
-  EXPECT_EQ(kTinyLen * 2, start);
+  result = cb.GetResult(
+      entry->GetAvailableRange(kTinyLen * 2, kTinyLen, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
+  EXPECT_EQ(kTinyLen * 2, result.start);
 
   // Get a huge range with maximum boundary
-  start = -1;
-  rv = entry->GetAvailableRange(0x2100000, std::numeric_limits<int32_t>::max(),
-                                &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
+  result = cb.GetResult(entry->GetAvailableRange(
+      0x2100000, std::numeric_limits<int32_t>::max(), cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
 
   entry->Close();
 }
@@ -1831,24 +1842,27 @@
   // Try to query a range starting from that block 0.
   // The cache tracks: [0, 612) [1024, 3072).
   // The request is for: [812, 2059) so response should be [1024, 2059), which
-  // has lenth = 1035. Previously this return a negative number for rv.
-  int64_t start = -1;
-  net::TestCompletionCallback cb;
-  int rv = entry->GetAvailableRange(812, 1247, &start, cb.callback());
-  EXPECT_EQ(1035, cb.GetResult(rv));
-  EXPECT_EQ(1024, start);
+  // has length = 1035. Previously this return a negative number for rv.
+  TestRangeResultCompletionCallback cb;
+  RangeResult result =
+      cb.GetResult(entry->GetAvailableRange(812, 1247, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1035, result.available_len);
+  EXPECT_EQ(1024, result.start);
 
   // Now query [512, 1536). This matches both [512, 612) and [1024, 1536),
   // so this should return [512, 612).
-  rv = entry->GetAvailableRange(512, 1024, &start, cb.callback());
-  EXPECT_EQ(100, cb.GetResult(rv));
-  EXPECT_EQ(512, start);
+  result = cb.GetResult(entry->GetAvailableRange(512, 1024, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(100, result.available_len);
+  EXPECT_EQ(512, result.start);
 
   // Now query next portion, [612, 1636). This now just should produce
   // [1024, 1636)
-  rv = entry->GetAvailableRange(612, 1024, &start, cb.callback());
-  EXPECT_EQ(612, cb.GetResult(rv));
-  EXPECT_EQ(1024, start);
+  result = cb.GetResult(entry->GetAvailableRange(612, 1024, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(612, result.available_len);
+  EXPECT_EQ(1024, result.start);
 
   // Do a continuous small write, this one at [3072, 3684).
   // This means the cache tracks [1024, 3072) via bitmaps and [3072, 3684)
@@ -1857,9 +1871,10 @@
                                         buf_small.get(), kSmallSize));
 
   // Query [2048, 4096). Should get [2048, 3684)
-  rv = entry->GetAvailableRange(2048, 2048, &start, cb.callback());
-  EXPECT_EQ(1636, cb.GetResult(rv));
-  EXPECT_EQ(2048, start);
+  result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1636, result.available_len);
+  EXPECT_EQ(2048, result.start);
 
   // Now write at [4096, 4708). Since only one sub-kb thing is tracked, this
   // now tracks  [1024, 3072) via bitmaps and [4096, 4708) as the last write.
@@ -1867,22 +1882,26 @@
                                         buf_small.get(), kSmallSize));
 
   // Query [2048, 4096). Should get [2048, 3072)
-  rv = entry->GetAvailableRange(2048, 2048, &start, cb.callback());
-  EXPECT_EQ(1024, cb.GetResult(rv));
-  EXPECT_EQ(2048, start);
+  result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1024, result.available_len);
+  EXPECT_EQ(2048, result.start);
 
   // Query 2K more after that: [3072, 5120). Should get [4096, 4708)
-  rv = entry->GetAvailableRange(3072, 2048, &start, cb.callback());
-  EXPECT_EQ(612, cb.GetResult(rv));
-  EXPECT_EQ(4096, start);
+  result = cb.GetResult(entry->GetAvailableRange(3072, 2048, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(612, result.available_len);
+  EXPECT_EQ(4096, result.start);
 
   // Also double-check that offsets within later children are correctly
   // computed.
   EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 0x200400,
                                         buf_small.get(), kSmallSize));
-  rv = entry->GetAvailableRange(0x100000, 0x200000, &start, cb.callback());
-  EXPECT_EQ(kSmallSize, cb.GetResult(rv));
-  EXPECT_EQ(0x200400, start);
+  result =
+      cb.GetResult(entry->GetAvailableRange(0x100000, 0x200000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(kSmallSize, result.available_len);
+  EXPECT_EQ(0x200400, result.start);
 
   entry->Close();
 }
@@ -1908,42 +1927,46 @@
   int offset = 1024 - 500;
   int rv = 0;
   net::TestCompletionCallback cb;
-  int64_t start;
+  TestRangeResultCompletionCallback range_cb;
+  RangeResult result;
   for (int i = 0; i < 5; i++) {
     // Check result of last GetAvailableRange.
-    EXPECT_EQ(0, rv);
+    EXPECT_EQ(0, result.available_len);
 
     rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
     EXPECT_EQ(kSize, cb.GetResult(rv));
 
-    rv = entry->GetAvailableRange(offset - 100, kSize, &start, cb.callback());
-    EXPECT_EQ(0, cb.GetResult(rv));
+    result = range_cb.GetResult(
+        entry->GetAvailableRange(offset - 100, kSize, range_cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(0, result.available_len);
 
-    rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
-    rv = cb.GetResult(rv);
-    if (!rv) {
+    result = range_cb.GetResult(
+        entry->GetAvailableRange(offset, kSize, range_cb.callback()));
+    if (!result.available_len) {
       rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
       EXPECT_EQ(0, cb.GetResult(rv));
-      rv = 0;
     }
     offset += 1024 * i + 100;
   }
 
   // The last write started 100 bytes below a bundary, so there should be 80
   // bytes after the boundary.
-  EXPECT_EQ(80, rv);
-  EXPECT_EQ(1024 * 7, start);
-  rv = entry->ReadSparseData(start, buf_2.get(), kSize, cb.callback());
+  EXPECT_EQ(80, result.available_len);
+  EXPECT_EQ(1024 * 7, result.start);
+  rv = entry->ReadSparseData(result.start, buf_2.get(), kSize, cb.callback());
   EXPECT_EQ(80, cb.GetResult(rv));
   EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
 
   // And even that part is dropped when another write changes the offset.
-  offset = start;
+  offset = result.start;
   rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
   EXPECT_EQ(kSize, cb.GetResult(rv));
 
-  rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
+  result = range_cb.GetResult(
+      entry->GetAvailableRange(offset, kSize, range_cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
   entry->Close();
 }
 
@@ -1963,16 +1986,19 @@
 
   // Any starting offset is fine as long as it is 1024-bytes aligned.
   int rv = 0;
+  RangeResult result;
   net::TestCompletionCallback cb;
-  int64_t start;
+  TestRangeResultCompletionCallback range_cb;
   int64_t offset = 1024 * 11;
   for (; offset < 20000; offset += kSize) {
     rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
     EXPECT_EQ(kSize, cb.GetResult(rv));
 
-    rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
-    EXPECT_EQ(kSize, cb.GetResult(rv));
-    EXPECT_EQ(offset, start);
+    result = range_cb.GetResult(
+        entry->GetAvailableRange(offset, kSize, range_cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(kSize, result.available_len);
+    EXPECT_EQ(offset, result.start);
 
     rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
     EXPECT_EQ(kSize, cb.GetResult(rv));
@@ -1985,9 +2011,11 @@
   // Verify again the last write made.
   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
   offset -= kSize;
-  rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
-  EXPECT_EQ(kSize, cb.GetResult(rv));
-  EXPECT_EQ(offset, start);
+  result = range_cb.GetResult(
+      entry->GetAvailableRange(offset, kSize, range_cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(kSize, result.available_len);
+  EXPECT_EQ(offset, result.start);
 
   rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
   EXPECT_EQ(kSize, cb.GetResult(rv));
@@ -2100,40 +2128,46 @@
   EXPECT_EQ(8192, entry->WriteSparseData(50000, buf.get(), 8192,
                                          net::CompletionOnceCallback()));
 
-  int64_t start;
-  net::TestCompletionCallback cb;
+  TestRangeResultCompletionCallback cb;
   // Test that we stop at a discontinuous child at the second block.
-  int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
-  EXPECT_EQ(1024, cb.GetResult(rv));
-  EXPECT_EQ(0, start);
+  RangeResult result =
+      cb.GetResult(entry->GetAvailableRange(0, 10000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1024, result.available_len);
+  EXPECT_EQ(0, result.start);
 
   // Test that number of bytes is reported correctly when we start from the
   // middle of a filled region.
-  rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
-  EXPECT_EQ(512, cb.GetResult(rv));
-  EXPECT_EQ(512, start);
+  result = cb.GetResult(entry->GetAvailableRange(512, 10000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(512, result.available_len);
+  EXPECT_EQ(512, result.start);
 
   // Test that we found bytes in the child of next block.
-  rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
-  EXPECT_EQ(1024, cb.GetResult(rv));
-  EXPECT_EQ(5120, start);
+  result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1024, result.available_len);
+  EXPECT_EQ(5120, result.start);
 
   // Test that the desired length is respected. It starts within a filled
   // region.
-  rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
-  EXPECT_EQ(512, cb.GetResult(rv));
-  EXPECT_EQ(5500, start);
+  result = cb.GetResult(entry->GetAvailableRange(5500, 512, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(512, result.available_len);
+  EXPECT_EQ(5500, result.start);
 
   // Test that the desired length is respected. It starts before a filled
   // region.
-  rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
-  EXPECT_EQ(500, cb.GetResult(rv));
-  EXPECT_EQ(5120, start);
+  result = cb.GetResult(entry->GetAvailableRange(5000, 620, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(500, result.available_len);
+  EXPECT_EQ(5120, result.start);
 
   // Test that multiple blocks are scanned.
-  rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
-  EXPECT_EQ(8192, cb.GetResult(rv));
-  EXPECT_EQ(50000, start);
+  result = cb.GetResult(entry->GetAvailableRange(40000, 20000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(8192, result.available_len);
+  EXPECT_EQ(50000, result.start);
 
   entry->Close();
 }
@@ -2338,53 +2372,64 @@
   EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
   EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
 
-  int rv;
-  int64_t start;
-  net::TestCompletionCallback cb;
+  TestRangeResultCompletionCallback cb;
+  RangeResult result;
   if (memory_only_ || simple_cache_mode_) {
-    rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
-    EXPECT_EQ(100, cb.GetResult(rv));
-    EXPECT_EQ(500, start);
+    result = cb.GetResult(entry->GetAvailableRange(0, 600, cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(100, result.available_len);
+    EXPECT_EQ(500, result.start);
   } else {
-    rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
-    EXPECT_EQ(1024, cb.GetResult(rv));
-    EXPECT_EQ(1024, start);
+    result = cb.GetResult(entry->GetAvailableRange(0, 2048, cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(1024, result.available_len);
+    EXPECT_EQ(1024, result.start);
   }
-  rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
-  EXPECT_EQ(500, cb.GetResult(rv));
-  EXPECT_EQ(kSize, start);
-  rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
+  result = cb.GetResult(entry->GetAvailableRange(kSize, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(500, result.available_len);
+  EXPECT_EQ(kSize, result.start);
+  result =
+      cb.GetResult(entry->GetAvailableRange(20 * 1024, 10000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
   if (memory_only_ || simple_cache_mode_)
-    EXPECT_EQ(3616, cb.GetResult(rv));
+    EXPECT_EQ(3616, result.available_len);
   else
-    EXPECT_EQ(3072, cb.GetResult(rv));
+    EXPECT_EQ(3072, result.available_len);
 
-  EXPECT_EQ(20 * 1024, start);
+  EXPECT_EQ(20 * 1024, result.start);
 
   // 1. Query before a filled 1KB block.
   // 2. Query within a filled 1KB block.
   // 3. Query beyond a filled 1KB block.
   if (memory_only_ || simple_cache_mode_) {
-    rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
-    EXPECT_EQ(3496, cb.GetResult(rv));
-    EXPECT_EQ(20000, start);
+    result =
+        cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(3496, result.available_len);
+    EXPECT_EQ(20000, result.start);
   } else {
-    rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
-    EXPECT_EQ(3016, cb.GetResult(rv));
-    EXPECT_EQ(20480, start);
+    result =
+        cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(3016, result.available_len);
+    EXPECT_EQ(20480, result.start);
   }
-  rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
-  EXPECT_EQ(1523, cb.GetResult(rv));
-  EXPECT_EQ(3073, start);
-  rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
-  EXPECT_EQ(0, cb.GetResult(rv));
-  EXPECT_EQ(4600, start);
+  result = cb.GetResult(entry->GetAvailableRange(3073, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(1523, result.available_len);
+  EXPECT_EQ(3073, result.start);
+  result = cb.GetResult(entry->GetAvailableRange(4600, kSize, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
+  EXPECT_EQ(4600, result.start);
 
   // Now make another write and verify that there is no hole in between.
   EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
-  rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
-  EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
-  EXPECT_EQ(1024, start);
+  result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(7 * 1024 + 500, result.available_len);
+  EXPECT_EQ(1024, result.start);
   EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
   EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
   EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
@@ -2483,17 +2528,17 @@
     EXPECT_EQ(0, memcmp(buf->data(), read_buf->data(), kSize));
   }
 
-  int64_t out_start = 0;
-  net::TestCompletionCallback cb;
-  rv = entry->GetAvailableRange(kOffset - kSize, kSize * 3, &out_start,
-                                cb.callback());
-  rv = cb.GetResult(rv);
+  TestRangeResultCompletionCallback cb;
+  RangeResult result = cb.GetResult(
+      entry->GetAvailableRange(kOffset - kSize, kSize * 3, cb.callback()));
   if (expect_unsupported) {
     // GetAvailableRange just returns nothing found, not an error.
-    EXPECT_EQ(rv, 0);
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(result.available_len, 0);
   } else {
-    EXPECT_EQ(kSize, rv);
-    EXPECT_EQ(kOffset, out_start);
+    EXPECT_EQ(net::OK, result.net_error);
+    EXPECT_EQ(kSize, result.available_len);
+    EXPECT_EQ(kOffset, result.start);
   }
 
   entry->Close();
@@ -2534,11 +2579,11 @@
   EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
 
   // GetAvailableRange just returns nothing.
-  net::TestCompletionCallback cb;
-  int64_t out_start = 0;
-  rv = entry->GetAvailableRange(kLimit, kSize * 3, &out_start, cb.callback());
-  rv = cb.GetResult(rv);
-  EXPECT_EQ(rv, 0);
+  TestRangeResultCompletionCallback cb;
+  RangeResult result =
+      cb.GetResult(entry->GetAvailableRange(kLimit, kSize * 3, cb.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
   entry->Close();
 }
 
@@ -2620,14 +2665,14 @@
   CacheTestFillBuffer(buf->data(), kSize, false);
 
   // This will open and write two "real" entries.
-  net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
+  net::TestCompletionCallback cb1, cb2, cb3, cb4;
   int rv = entry->WriteSparseData(
       1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
   EXPECT_THAT(rv, IsError(net::ERR_IO_PENDING));
 
-  int64_t offset = 0;
-  rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
-  rv = cb5.GetResult(rv);
+  TestRangeResultCompletionCallback cb5;
+  RangeResult result =
+      cb5.GetResult(entry->GetAvailableRange(0, kSize, cb5.callback()));
   if (!cb1.have_result()) {
     // We may or may not have finished writing to the entry. If we have not,
     // we cannot start another operation at this time.
@@ -2646,10 +2691,10 @@
 
   if (!cb1.have_result()) {
     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
-              entry->ReadSparseData(offset, buf.get(), kSize,
+              entry->ReadSparseData(result.start, buf.get(), kSize,
                                     net::CompletionOnceCallback()));
     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
-              entry->WriteSparseData(offset, buf.get(), kSize,
+              entry->WriteSparseData(result.start, buf.get(), kSize,
                                      net::CompletionOnceCallback()));
   }
 
@@ -2661,8 +2706,10 @@
   EXPECT_THAT(cb3.WaitForResult(), IsOk());
   EXPECT_THAT(cb4.WaitForResult(), IsOk());
 
-  rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
-  EXPECT_EQ(0, cb5.GetResult(rv));
+  result = cb5.GetResult(
+      entry->GetAvailableRange(result.start, kSize, cb5.callback()));
+  EXPECT_EQ(net::OK, result.net_error);
+  EXPECT_EQ(0, result.available_len);
   entry->Close();
 }
 
@@ -5041,9 +5088,11 @@
   // Similarly for other ops.
   EXPECT_EQ(net::ERR_FAILED, WriteSparseData(entry, 0, buffer.get(), kSize));
   net::TestCompletionCallback cb;
-  int64_t start;
-  int rv = entry->GetAvailableRange(0, 1024, &start, cb.callback());
-  EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
+
+  TestRangeResultCompletionCallback range_cb;
+  RangeResult result = range_cb.GetResult(
+      entry->GetAvailableRange(0, 1024, range_cb.callback()));
+  EXPECT_EQ(net::ERR_FAILED, result.net_error);
 
   entry->Close();
   disk_cache::FlushCacheThreadForTesting();
diff --git a/net/disk_cache/memory/mem_entry_impl.cc b/net/disk_cache/memory/mem_entry_impl.cc
index 3a5eac7..3e17d53 100644
--- a/net/disk_cache/memory/mem_entry_impl.cc
+++ b/net/disk_cache/memory/mem_entry_impl.cc
@@ -252,18 +252,17 @@
   return result;
 }
 
-int MemEntryImpl::GetAvailableRange(int64_t offset,
-                                    int len,
-                                    int64_t* start,
-                                    CompletionOnceCallback callback) {
+RangeResult MemEntryImpl::GetAvailableRange(int64_t offset,
+                                            int len,
+                                            RangeResultCallback callback) {
   if (net_log_.IsCapturing()) {
     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_GET_RANGE,
                           net::NetLogEventPhase::BEGIN, offset, len);
   }
-  int result = InternalGetAvailableRange(offset, len, start);
+  RangeResult result = InternalGetAvailableRange(offset, len);
   if (net_log_.IsCapturing()) {
     net_log_.EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
-      return CreateNetLogGetAvailableRangeResultParams(*start, result);
+      return CreateNetLogGetAvailableRangeResultParams(result);
     });
   }
   return result;
@@ -541,17 +540,14 @@
   return io_buf->BytesConsumed();
 }
 
-int MemEntryImpl::InternalGetAvailableRange(int64_t offset,
-                                            int len,
-                                            int64_t* start) {
+RangeResult MemEntryImpl::InternalGetAvailableRange(int64_t offset, int len) {
   DCHECK_EQ(PARENT_ENTRY, type());
-  DCHECK(start);
 
   if (!InitSparseInfo())
-    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+    return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
 
-  if (offset < 0 || len < 0 || !start)
-    return net::ERR_INVALID_ARGUMENT;
+  if (offset < 0 || len < 0)
+    return RangeResult(net::ERR_INVALID_ARGUMENT);
 
   // Truncate |len| to make sure that |offset + len| does not overflow.
   // This is OK since one can't write that far anyway.
@@ -584,12 +580,11 @@
 
       found.SpanningUnion(relevant_in_next_child);
     }
-    *start = found.min();
-    return found.Length();
+
+    return RangeResult(found.min(), found.Length());
   }
 
-  *start = offset;
-  return 0;
+  return RangeResult(offset, 0);
 }
 
 bool MemEntryImpl::InitSparseInfo() {
diff --git a/net/disk_cache/memory/mem_entry_impl.h b/net/disk_cache/memory/mem_entry_impl.h
index be4934c..9b71e03a 100644
--- a/net/disk_cache/memory/mem_entry_impl.h
+++ b/net/disk_cache/memory/mem_entry_impl.h
@@ -126,10 +126,9 @@
                       IOBuffer* buf,
                       int buf_len,
                       CompletionOnceCallback callback) override;
-  int GetAvailableRange(int64_t offset,
-                        int len,
-                        int64_t* start,
-                        CompletionOnceCallback callback) override;
+  RangeResult GetAvailableRange(int64_t offset,
+                                int len,
+                                RangeResultCallback callback) override;
   bool CouldBeSparse() const override;
   void CancelSparseIO() override {}
   net::Error ReadyForSparseIO(CompletionOnceCallback callback) override;
@@ -156,7 +155,7 @@
                         bool truncate);
   int InternalReadSparseData(int64_t offset, IOBuffer* buf, int buf_len);
   int InternalWriteSparseData(int64_t offset, IOBuffer* buf, int buf_len);
-  int InternalGetAvailableRange(int64_t offset, int len, int64_t* start);
+  RangeResult InternalGetAvailableRange(int64_t offset, int len);
 
   // Initializes the children map and sparse info. This method is only called
   // on a parent entry.
diff --git a/net/disk_cache/net_log_parameters.cc b/net/disk_cache/net_log_parameters.cc
index 67afd8b..89c1245a 100644
--- a/net/disk_cache/net_log_parameters.cc
+++ b/net/disk_cache/net_log_parameters.cc
@@ -107,14 +107,14 @@
   });
 }
 
-base::Value CreateNetLogGetAvailableRangeResultParams(int64_t start,
-                                                      int result) {
+base::Value CreateNetLogGetAvailableRangeResultParams(
+    disk_cache::RangeResult result) {
   base::Value dict(base::Value::Type::DICTIONARY);
-  if (result > 0) {
-    dict.SetIntKey("length", result);
-    dict.SetKey("start", net::NetLogNumberValue(start));
+  if (result.net_error == net::OK) {
+    dict.SetIntKey("length", result.available_len);
+    dict.SetKey("start", net::NetLogNumberValue(result.start));
   } else {
-    dict.SetIntKey("net_error", result);
+    dict.SetIntKey("net_error", result.net_error);
   }
   return dict;
 }
diff --git a/net/disk_cache/net_log_parameters.h b/net/disk_cache/net_log_parameters.h
index bbfa446..605b7f4 100644
--- a/net/disk_cache/net_log_parameters.h
+++ b/net/disk_cache/net_log_parameters.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 
+#include "net/disk_cache/disk_cache.h"
 #include "net/log/net_log_with_source.h"
 
 namespace net {
@@ -63,8 +64,8 @@
                            int child_len);
 
 // Creates NetLog parameters for when a call to GetAvailableRange returns.
-base::Value CreateNetLogGetAvailableRangeResultParams(int64_t start,
-                                                      int result);
+base::Value CreateNetLogGetAvailableRangeResultParams(
+    const disk_cache::RangeResult result);
 
 }  // namespace disk_cache
 
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc
index 892d10e..f3ef30d0 100644
--- a/net/disk_cache/simple/simple_entry_impl.cc
+++ b/net/disk_cache/simple/simple_entry_impl.cc
@@ -566,13 +566,12 @@
   return net::ERR_IO_PENDING;
 }
 
-int SimpleEntryImpl::GetAvailableRange(int64_t offset,
-                                       int len,
-                                       int64_t* start,
-                                       CompletionOnceCallback callback) {
+RangeResult SimpleEntryImpl::GetAvailableRange(int64_t offset,
+                                               int len,
+                                               RangeResultCallback callback) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   if (offset < 0 || len < 0)
-    return net::ERR_INVALID_ARGUMENT;
+    return RangeResult(net::ERR_INVALID_ARGUMENT);
 
   // Truncate |len| to make sure that |offset + len| does not overflow.
   // This is OK since one can't write that far anyway.
@@ -582,8 +581,8 @@
 
   ScopedOperationRunner operation_runner(this);
   pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
-      this, offset, len, start, std::move(callback)));
-  return net::ERR_IO_PENDING;
+      this, offset, len, std::move(callback)));
+  return RangeResult(net::ERR_IO_PENDING);
 }
 
 bool SimpleEntryImpl::CouldBeSparse() const {
@@ -766,8 +765,7 @@
         break;
       case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
         GetAvailableRangeInternal(operation.sparse_offset(), operation.length(),
-                                  operation.out_start(),
-                                  operation.ReleaseCallback());
+                                  operation.ReleaseRangeResultCalback());
         break;
       case SimpleEntryOperation::TYPE_DOOM:
         DoomEntryInternal(operation.ReleaseCallback());
@@ -1308,18 +1306,17 @@
                                              std::move(reply), entry_priority_);
 }
 
-void SimpleEntryImpl::GetAvailableRangeInternal(
-    int64_t sparse_offset,
-    int len,
-    int64_t* out_start,
-    net::CompletionOnceCallback callback) {
+void SimpleEntryImpl::GetAvailableRangeInternal(int64_t sparse_offset,
+                                                int len,
+                                                RangeResultCallback callback) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   ScopedOperationRunner operation_runner(this);
 
   if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
     if (!callback.is_null()) {
       base::SequencedTaskRunnerHandle::Get()->PostTask(
-          FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
+          FROM_HERE,
+          base::BindOnce(std::move(callback), RangeResult(net::ERR_FAILED)));
     }
     // |this| may be destroyed after return here.
     return;
@@ -1328,12 +1325,11 @@
   DCHECK_EQ(STATE_READY, state_);
   state_ = STATE_IO_PENDING;
 
-  std::unique_ptr<int> result(new int());
-  OnceClosure task =
-      base::BindOnce(&SimpleSynchronousEntry::GetAvailableRange,
-                     base::Unretained(synchronous_entry_),
-                     SimpleSynchronousEntry::SparseRequest(sparse_offset, len),
-                     out_start, result.get());
+  auto result = std::make_unique<RangeResult>();
+  OnceClosure task = base::BindOnce(
+      &SimpleSynchronousEntry::GetAvailableRange,
+      base::Unretained(synchronous_entry_),
+      SimpleSynchronousEntry::SparseRequest(sparse_offset, len), result.get());
   OnceClosure reply =
       base::BindOnce(&SimpleEntryImpl::GetAvailableRangeOperationComplete, this,
                      std::move(callback), std::move(result));
@@ -1494,8 +1490,7 @@
   }
 }
 
-void SimpleEntryImpl::EntryOperationComplete(
-    net::CompletionOnceCallback completion_callback,
+void SimpleEntryImpl::UpdateStateAfterOperationComplete(
     const SimpleEntryStat& entry_stat,
     int result) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -1508,7 +1503,13 @@
     state_ = STATE_READY;
     UpdateDataFromEntryStat(entry_stat);
   }
+}
 
+void SimpleEntryImpl::EntryOperationComplete(
+    net::CompletionOnceCallback completion_callback,
+    const SimpleEntryStat& entry_stat,
+    int result) {
+  UpdateStateAfterOperationComplete(entry_stat, result);
   if (!completion_callback.is_null()) {
     base::SequencedTaskRunnerHandle::Get()->PostTask(
         FROM_HERE, base::BindOnce(std::move(completion_callback), result));
@@ -1623,15 +1624,20 @@
 }
 
 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
-    net::CompletionOnceCallback completion_callback,
-    std::unique_ptr<int> result) {
+    RangeResultCallback completion_callback,
+    std::unique_ptr<RangeResult> result) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   DCHECK(synchronous_entry_);
   DCHECK(result);
 
   SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
                              sparse_data_size_);
-  EntryOperationComplete(std::move(completion_callback), entry_stat, *result);
+  UpdateStateAfterOperationComplete(entry_stat, result->net_error);
+  if (!completion_callback.is_null()) {
+    base::SequencedTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, base::BindOnce(std::move(completion_callback), *result));
+  }
+  RunNextOperationIfNeeded();
 }
 
 void SimpleEntryImpl::DoomOperationComplete(
diff --git a/net/disk_cache/simple/simple_entry_impl.h b/net/disk_cache/simple/simple_entry_impl.h
index 2d66478..a63952d4 100644
--- a/net/disk_cache/simple/simple_entry_impl.h
+++ b/net/disk_cache/simple/simple_entry_impl.h
@@ -134,10 +134,9 @@
                       net::IOBuffer* buf,
                       int buf_len,
                       CompletionOnceCallback callback) override;
-  int GetAvailableRange(int64_t offset,
-                        int len,
-                        int64_t* start,
-                        CompletionOnceCallback callback) override;
+  RangeResult GetAvailableRange(int64_t offset,
+                                int len,
+                                RangeResultCallback callback) override;
   bool CouldBeSparse() const override;
   void CancelSparseIO() override;
   net::Error ReadyForSparseIO(CompletionOnceCallback callback) override;
@@ -272,8 +271,7 @@
 
   void GetAvailableRangeInternal(int64_t sparse_offset,
                                  int len,
-                                 int64_t* out_start,
-                                 CompletionOnceCallback callback);
+                                 RangeResultCallback callback);
 
   void DoomEntryInternal(CompletionOnceCallback callback);
 
@@ -294,6 +292,11 @@
   void CloseOperationComplete(
       std::unique_ptr<SimpleEntryCloseResults> in_results);
 
+  // Internal utility method used by other completion methods.
+  // Updaties state and dooms on errors.
+  void UpdateStateAfterOperationComplete(const SimpleEntryStat& entry_stat,
+                                         int result);
+
   // Internal utility method used by other completion methods. Calls
   // |completion_callback| after updating state and dooming on errors.
   void EntryOperationComplete(CompletionOnceCallback completion_callback,
@@ -328,8 +331,8 @@
                                     std::unique_ptr<int> result);
 
   void GetAvailableRangeOperationComplete(
-      CompletionOnceCallback completion_callback,
-      std::unique_ptr<int> result);
+      RangeResultCallback completion_callback,
+      std::unique_ptr<RangeResult> result);
 
   // Called after an asynchronous doom completes.
   void DoomOperationComplete(CompletionOnceCallback callback,
diff --git a/net/disk_cache/simple/simple_entry_operation.cc b/net/disk_cache/simple/simple_entry_operation.cc
index ebe9cad..b58f573 100644
--- a/net/disk_cache/simple/simple_entry_operation.cc
+++ b/net/disk_cache/simple/simple_entry_operation.cc
@@ -23,7 +23,7 @@
     EntryResultState result_state,
     EntryResultCallback callback) {
   SimpleEntryOperation op(entry, nullptr, CompletionOnceCallback(), 0, 0, 0,
-                          nullptr, TYPE_OPEN, INDEX_NOEXIST, 0, false, false);
+                          TYPE_OPEN, INDEX_NOEXIST, 0, false, false);
   op.entry_callback_ = std::move(callback);
   op.entry_result_state_ = result_state;
   return op;
@@ -35,7 +35,7 @@
     EntryResultState result_state,
     EntryResultCallback callback) {
   SimpleEntryOperation op(entry, nullptr, CompletionOnceCallback(), 0, 0, 0,
-                          nullptr, TYPE_CREATE, INDEX_NOEXIST, 0, false, false);
+                          TYPE_CREATE, INDEX_NOEXIST, 0, false, false);
   op.entry_callback_ = std::move(callback);
   op.entry_result_state_ = result_state;
   return op;
@@ -48,8 +48,7 @@
     EntryResultState result_state,
     EntryResultCallback callback) {
   SimpleEntryOperation op(entry, nullptr, CompletionOnceCallback(), 0, 0, 0,
-                          nullptr, TYPE_OPEN_OR_CREATE, index_state, 0, false,
-                          false);
+                          TYPE_OPEN_OR_CREATE, index_state, 0, false, false);
   op.entry_callback_ = std::move(callback);
   op.entry_result_state_ = result_state;
   return op;
@@ -59,8 +58,7 @@
 SimpleEntryOperation SimpleEntryOperation::CloseOperation(
     SimpleEntryImpl* entry) {
   return SimpleEntryOperation(entry, nullptr, CompletionOnceCallback(), 0, 0, 0,
-                              nullptr, TYPE_CLOSE, INDEX_NOEXIST, 0, false,
-                              false);
+                              TYPE_CLOSE, INDEX_NOEXIST, 0, false, false);
 }
 
 // static
@@ -72,8 +70,8 @@
     net::IOBuffer* buf,
     CompletionOnceCallback callback) {
   return SimpleEntryOperation(entry, buf, std::move(callback), offset, 0,
-                              length, nullptr, TYPE_READ, INDEX_NOEXIST, index,
-                              false, false);
+                              length, TYPE_READ, INDEX_NOEXIST, index, false,
+                              false);
 }
 
 // static
@@ -87,7 +85,7 @@
     bool optimistic,
     CompletionOnceCallback callback) {
   return SimpleEntryOperation(entry, buf, std::move(callback), offset, 0,
-                              length, nullptr, TYPE_WRITE, INDEX_NOEXIST, index,
+                              length, TYPE_WRITE, INDEX_NOEXIST, index,
                               truncate, optimistic);
 }
 
@@ -99,8 +97,8 @@
     net::IOBuffer* buf,
     CompletionOnceCallback callback) {
   return SimpleEntryOperation(entry, buf, std::move(callback), 0, sparse_offset,
-                              length, nullptr, TYPE_READ_SPARSE, INDEX_NOEXIST,
-                              0, false, false);
+                              length, TYPE_READ_SPARSE, INDEX_NOEXIST, 0, false,
+                              false);
 }
 
 // static
@@ -111,8 +109,8 @@
     net::IOBuffer* buf,
     CompletionOnceCallback callback) {
   return SimpleEntryOperation(entry, buf, std::move(callback), 0, sparse_offset,
-                              length, nullptr, TYPE_WRITE_SPARSE, INDEX_NOEXIST,
-                              0, false, false);
+                              length, TYPE_WRITE_SPARSE, INDEX_NOEXIST, 0,
+                              false, false);
 }
 
 // static
@@ -120,11 +118,12 @@
     SimpleEntryImpl* entry,
     int64_t sparse_offset,
     int length,
-    int64_t* out_start,
-    CompletionOnceCallback callback) {
-  return SimpleEntryOperation(
-      entry, nullptr, std::move(callback), 0, sparse_offset, length, out_start,
-      TYPE_GET_AVAILABLE_RANGE, INDEX_NOEXIST, 0, false, false);
+    RangeResultCallback callback) {
+  SimpleEntryOperation op(entry, nullptr, CompletionOnceCallback(), 0,
+                          sparse_offset, length, TYPE_GET_AVAILABLE_RANGE,
+                          INDEX_NOEXIST, 0, false, false);
+  op.range_callback_ = std::move(callback);
+  return op;
 }
 
 // static
@@ -135,14 +134,13 @@
   const int offset = 0;
   const int64_t sparse_offset = 0;
   const int length = 0;
-  int64_t* const out_start = nullptr;
   const OpenEntryIndexEnum index_state = INDEX_NOEXIST;
   const int index = 0;
   const bool truncate = false;
   const bool optimistic = false;
   return SimpleEntryOperation(entry, buf, std::move(callback), offset,
-                              sparse_offset, length, out_start, TYPE_DOOM,
-                              index_state, index, truncate, optimistic);
+                              sparse_offset, length, TYPE_DOOM, index_state,
+                              index, truncate, optimistic);
 }
 
 SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
@@ -151,7 +149,6 @@
                                            int offset,
                                            int64_t sparse_offset,
                                            int length,
-                                           int64_t* out_start,
                                            EntryOperationType type,
                                            OpenEntryIndexEnum index_state,
                                            int index,
@@ -163,7 +160,6 @@
       offset_(offset),
       sparse_offset_(sparse_offset),
       length_(length),
-      out_start_(out_start),
       type_(type),
       index_state_(index_state),
       index_(index),
diff --git a/net/disk_cache/simple/simple_entry_operation.h b/net/disk_cache/simple/simple_entry_operation.h
index c598944..a7f4d52 100644
--- a/net/disk_cache/simple/simple_entry_operation.h
+++ b/net/disk_cache/simple/simple_entry_operation.h
@@ -92,8 +92,7 @@
       SimpleEntryImpl* entry,
       int64_t sparse_offset,
       int length,
-      int64_t* out_start,
-      CompletionOnceCallback callback);
+      RangeResultCallback callback);
   static SimpleEntryOperation DoomOperation(SimpleEntryImpl* entry,
                                             CompletionOnceCallback callback);
 
@@ -104,6 +103,9 @@
   EntryResultCallback ReleaseEntryResultCallback() {
     return std::move(entry_callback_);
   }
+  RangeResultCallback ReleaseRangeResultCalback() {
+    return std::move(range_callback_);
+  }
 
   EntryResultState entry_result_state() { return entry_result_state_; }
 
@@ -112,7 +114,6 @@
   int offset() const { return offset_; }
   int64_t sparse_offset() const { return sparse_offset_; }
   int length() const { return length_; }
-  int64_t* out_start() { return out_start_; }
   net::IOBuffer* buf() { return buf_.get(); }
   bool truncate() const { return truncate_; }
   bool optimistic() const { return optimistic_; }
@@ -124,7 +125,6 @@
                        int offset,
                        int64_t sparse_offset,
                        int length,
-                       int64_t* out_start,
                        EntryOperationType type,
                        OpenEntryIndexEnum index_state,
                        int index,
@@ -146,7 +146,7 @@
   const int length_;
 
   // Used in get available range operations.
-  int64_t* const out_start_;
+  RangeResultCallback range_callback_;
 
   const EntryOperationType type_;
   // Used in the "open or create" operation.
diff --git a/net/disk_cache/simple/simple_synchronous_entry.cc b/net/disk_cache/simple/simple_synchronous_entry.cc
index e901b32..2866764 100644
--- a/net/disk_cache/simple/simple_synchronous_entry.cc
+++ b/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -875,8 +875,7 @@
 }
 
 void SimpleSynchronousEntry::GetAvailableRange(const SparseRequest& in_entry_op,
-                                               int64_t* out_start,
-                                               int* out_result) {
+                                               RangeResult* out_result) {
   DCHECK(initialized_);
   int64_t offset = in_entry_op.sparse_offset;
   int len = in_entry_op.buf_len;
@@ -907,8 +906,8 @@
   }
 
   int64_t len_from_start = len - (start - offset);
-  *out_start = start;
-  *out_result = static_cast<int>(std::min(avail_so_far, len_from_start));
+  *out_result = RangeResult(
+      start, static_cast<int>(std::min(avail_so_far, len_from_start)));
 }
 
 int SimpleSynchronousEntry::CheckEOFRecord(base::File* file,
diff --git a/net/disk_cache/simple/simple_synchronous_entry.h b/net/disk_cache/simple/simple_synchronous_entry.h
index 9e1536b..ae61f04c 100644
--- a/net/disk_cache/simple/simple_synchronous_entry.h
+++ b/net/disk_cache/simple/simple_synchronous_entry.h
@@ -46,6 +46,7 @@
 NET_EXPORT_PRIVATE int GetSimpleCachePrefetchSize();
 
 class SimpleSynchronousEntry;
+struct RangeResult;
 
 // This class handles the passing of data about the entry between
 // SimpleEntryImplementation and SimpleSynchronousEntry and the computation of
@@ -269,8 +270,7 @@
                        SimpleEntryStat* out_entry_stat,
                        int* out_result);
   void GetAvailableRange(const SparseRequest& in_entry_op,
-                         int64_t* out_start,
-                         int* out_result);
+                         RangeResult* out_result);
 
   // Close all streams, and add write EOF records to streams indicated by the
   // CRCRecord entries in |crc32s_to_write|.
diff --git a/net/http/mock_http_cache.cc b/net/http/mock_http_cache.cc
index e6ea9bce..728a36d 100644
--- a/net/http/mock_http_cache.cc
+++ b/net/http/mock_http_cache.cc
@@ -72,8 +72,7 @@
 
 struct MockDiskEntry::CallbackInfo {
   scoped_refptr<MockDiskEntry> entry;
-  net::CompletionOnceCallback callback;
-  int result;
+  base::OnceClosure callback;
 };
 
 MockDiskEntry::MockDiskEntry(const std::string& key)
@@ -268,44 +267,47 @@
   return ERR_IO_PENDING;
 }
 
-int MockDiskEntry::GetAvailableRange(int64_t offset,
-                                     int len,
-                                     int64_t* start,
-                                     CompletionOnceCallback callback) {
+disk_cache::RangeResult MockDiskEntry::GetAvailableRange(
+    int64_t offset,
+    int len,
+    RangeResultCallback callback) {
   DCHECK(!callback.is_null());
   if (!sparse_ || busy_ || cancel_)
-    return ERR_CACHE_OPERATION_NOT_SUPPORTED;
+    return RangeResult(ERR_CACHE_OPERATION_NOT_SUPPORTED);
   if (offset < 0)
-    return ERR_FAILED;
+    return RangeResult(ERR_FAILED);
 
   if (fail_requests_ & FAIL_GET_AVAILABLE_RANGE)
-    return ERR_CACHE_READ_FAILURE;
+    return RangeResult(ERR_CACHE_READ_FAILURE);
 
-  *start = offset;
+  RangeResult result;
+  result.net_error = OK;
+  result.start = offset;
+  result.available_len = 0;
   DCHECK(offset < std::numeric_limits<int32_t>::max());
   int real_offset = static_cast<int>(offset);
   if (static_cast<int>(data_[1].size()) < real_offset)
-    return 0;
+    return result;
 
   int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
-  int count = 0;
   for (; num > 0; num--, real_offset++) {
-    if (!count) {
+    if (!result.available_len) {
       if (data_[1][real_offset]) {
-        count++;
-        *start = real_offset;
+        result.available_len++;
+        result.start = real_offset;
       }
     } else {
       if (!data_[1][real_offset])
         break;
-      count++;
+      result.available_len++;
     }
   }
-  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
-    return count;
+  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
+    return result;
+  }
 
-  CallbackLater(std::move(callback), count);
-  return ERR_IO_PENDING;
+  CallbackLater(base::BindOnce(std::move(callback), result));
+  return RangeResult(ERR_IO_PENDING);
 }
 
 bool MockDiskEntry::CouldBeSparse() const {
@@ -348,7 +350,7 @@
     return;
   ignore_callbacks_ = value;
   if (!value)
-    StoreAndDeliverCallbacks(false, nullptr, CompletionOnceCallback(), 0);
+    StoreAndDeliverCallbacks(false, nullptr, base::OnceClosure());
 }
 
 MockDiskEntry::~MockDiskEntry() = default;
@@ -356,15 +358,19 @@
 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
 // if the consumer called Close on the MockDiskEntry.  We achieve that by
 // leveraging the fact that this class is reference counted.
-void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
+void MockDiskEntry::CallbackLater(base::OnceClosure callback) {
   if (ignore_callbacks_)
-    return StoreAndDeliverCallbacks(true, this, std::move(callback), result);
+    return StoreAndDeliverCallbacks(true, this, std::move(callback));
   base::ThreadTaskRunnerHandle::Get()->PostTask(
-      FROM_HERE, base::BindOnce(&MockDiskEntry::RunCallback, this,
-                                std::move(callback), result));
+      FROM_HERE,
+      base::BindOnce(&MockDiskEntry::RunCallback, this, std::move(callback)));
 }
 
-void MockDiskEntry::RunCallback(CompletionOnceCallback callback, int result) {
+void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
+  CallbackLater(base::BindOnce(std::move(callback), result));
+}
+
+void MockDiskEntry::RunCallback(base::OnceClosure callback) {
   if (busy_) {
     // This is kind of hacky, but controlling the behavior of just this entry
     // from a test is sort of complicated.  What we really want to do is
@@ -376,11 +382,11 @@
     // trips through the message loop instead of one).
     if (!delayed_) {
       delayed_ = true;
-      return CallbackLater(std::move(callback), result);
+      return CallbackLater(std::move(callback));
     }
   }
   busy_ = false;
-  std::move(callback).Run(result);
+  std::move(callback).Run();
 }
 
 // When |store| is true, stores the callback to be delivered later; otherwise
@@ -388,16 +394,15 @@
 // Static.
 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
                                              MockDiskEntry* entry,
-                                             CompletionOnceCallback callback,
-                                             int result) {
+                                             base::OnceClosure callback) {
   static std::vector<CallbackInfo> callback_list;
   if (store) {
-    CallbackInfo c = {entry, std::move(callback), result};
+    CallbackInfo c = {entry, std::move(callback)};
     callback_list.push_back(std::move(c));
   } else {
     for (size_t i = 0; i < callback_list.size(); i++) {
       CallbackInfo& c = callback_list[i];
-      c.entry->CallbackLater(std::move(c.callback), c.result);
+      c.entry->CallbackLater(std::move(c.callback));
     }
     callback_list.clear();
   }
diff --git a/net/http/mock_http_cache.h b/net/http/mock_http_cache.h
index f0736c17..7d97319 100644
--- a/net/http/mock_http_cache.h
+++ b/net/http/mock_http_cache.h
@@ -79,10 +79,9 @@
                       IOBuffer* buf,
                       int buf_len,
                       CompletionOnceCallback callback) override;
-  int GetAvailableRange(int64_t offset,
-                        int len,
-                        int64_t* start,
-                        CompletionOnceCallback callback) override;
+  RangeResult GetAvailableRange(int64_t offset,
+                                int len,
+                                RangeResultCallback callback) override;
   bool CouldBeSparse() const override;
   void CancelSparseIO() override;
   net::Error ReadyForSparseIO(
@@ -123,15 +122,15 @@
   // if the consumer called Close on the MockDiskEntry.  We achieve that by
   // leveraging the fact that this class is reference counted.
   void CallbackLater(CompletionOnceCallback callback, int result);
+  void CallbackLater(base::OnceClosure callback);
 
-  void RunCallback(CompletionOnceCallback callback, int result);
+  void RunCallback(base::OnceClosure callback);
 
   // When |store| is true, stores the callback to be delivered later; otherwise
   // delivers any callback previously stored.
   static void StoreAndDeliverCallbacks(bool store,
                                        MockDiskEntry* entry,
-                                       CompletionOnceCallback callback,
-                                       int result);
+                                       base::OnceClosure callback);
 
   static const int kNumCacheEntryDataIndices = 3;
 
diff --git a/net/http/partial_data.cc b/net/http/partial_data.cc
index 79f71ab..3f76d09 100644
--- a/net/http/partial_data.cc
+++ b/net/http/partial_data.cc
@@ -107,22 +107,18 @@
 
   if (sparse_entry_) {
     DCHECK(callback_.is_null());
-    // |start| will be deleted later in this method if GetAvailableRange()
-    // returns synchronously, or by GetAvailableRangeCompleted() if it returns
-    // asynchronously.
-    int64_t* start = new int64_t;
-    CompletionOnceCallback cb =
-        base::BindOnce(&PartialData::GetAvailableRangeCompleted,
-                       weak_factory_.GetWeakPtr(), start);
-    cached_min_len_ = entry->GetAvailableRange(current_range_start_, len, start,
-                                               std::move(cb));
+    disk_cache::RangeResultCallback cb = base::BindOnce(
+        &PartialData::GetAvailableRangeCompleted, weak_factory_.GetWeakPtr());
+    disk_cache::RangeResult range =
+        entry->GetAvailableRange(current_range_start_, len, std::move(cb));
 
+    cached_min_len_ =
+        range.net_error == OK ? range.available_len : range.net_error;
     if (cached_min_len_ == ERR_IO_PENDING) {
       callback_ = std::move(callback);
       return ERR_IO_PENDING;
     } else {
-      cached_start_ = *start;
-      delete start;
+      cached_start_ = range.start;
     }
   } else if (!truncated_) {
     if (byte_range_.HasFirstBytePosition() &&
@@ -456,17 +452,20 @@
   return static_cast<int32_t>(range_len);
 }
 
-void PartialData::GetAvailableRangeCompleted(int64_t* start, int result) {
+void PartialData::GetAvailableRangeCompleted(
+    const disk_cache::RangeResult& result) {
   DCHECK(!callback_.is_null());
-  DCHECK_NE(ERR_IO_PENDING, result);
+  DCHECK_NE(ERR_IO_PENDING, result.net_error);
 
-  cached_start_ = *start;
-  delete start;
-  cached_min_len_ = result;
-  if (result >= 0)
-    result = 1;  // Return success, go ahead and validate the entry.
+  int len_or_error =
+      result.net_error == OK ? result.available_len : result.net_error;
+  cached_start_ = result.start;
+  cached_min_len_ = len_or_error;
 
-  std::move(callback_).Run(result);
+  // ShouldValidateCache has an unusual convention where 0 denotes EOF,
+  // so convert end of range to success (since there may be things that need
+  // fetching from network or other ranges).
+  std::move(callback_).Run(len_or_error >= 0 ? 1 : len_or_error);
 }
 
 }  // namespace net
diff --git a/net/http/partial_data.h b/net/http/partial_data.h
index 3e8b1a40..134c32e 100644
--- a/net/http/partial_data.h
+++ b/net/http/partial_data.h
@@ -10,12 +10,10 @@
 #include "base/macros.h"
 #include "base/memory/weak_ptr.h"
 #include "net/base/completion_once_callback.h"
+#include "net/disk_cache/disk_cache.h"
 #include "net/http/http_byte_range.h"
 #include "net/http/http_request_headers.h"
 
-namespace disk_cache {
-class Entry;
-}
 
 namespace net {
 
@@ -131,8 +129,8 @@
   // Returns the length to use when scanning the cache.
   int GetNextRangeLen();
 
-  // Completion routine for our callback.  Deletes |start|.
-  void GetAvailableRangeCompleted(int64_t* start, int result);
+  // Completion routine for our callback.
+  void GetAvailableRangeCompleted(const disk_cache::RangeResult& result);
 
   // The portion we're trying to get, either from cache or network.
   int64_t current_range_start_;
@@ -143,7 +141,8 @@
   // succeeds.
   //
   // |cached_start_| represents the beginning of the range, while
-  // |cached_min_len_| the data not yet read (possibly overestimated).
+  // |cached_min_len_| the data not yet read (possibly overestimated). It may
+  // also have an error code latched into it.
   int64_t cached_start_;
   int cached_min_len_;