diff --git a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc index d775122fe9c1fc..7728723940a481 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc +++ b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc @@ -1100,18 +1100,8 @@ TfLiteStatus BenchmarkTfLiteModel::LoadModel() { std::unique_ptr BenchmarkTfLiteModel::GetOpResolver() const { - tflite::ops::builtin::BuiltinOpResolver* resolver = nullptr; - // When --use_xnnpack is explicitly set to false, skip applying the default - // XNNPACK delegate in TfLite runtime so that the original execution path - // based on the unmodified model graph is still exercised. - if (params_.HasParam("use_xnnpack") && - params_.HasValueSet("use_xnnpack") && - !params_.Get("use_xnnpack")) { - resolver = - new tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates(); - } else { - resolver = new tflite::ops::builtin::BuiltinOpResolver(); - } + tflite::ops::builtin::BuiltinOpResolver* resolver = + new tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates(); RegisterSelectedOps(resolver); return std::unique_ptr(resolver); }