[go: nahoru, domu]

Skip to content

Commit

Permalink
Disable allow-unregistered-dialect in tf-opt-main
Browse files Browse the repository at this point in the history
This flag is usually hiding misconfiguration and it is safer to opt-in explicitly on-demand.

PiperOrigin-RevId: 306240853
Change-Id: I0aae27195e51ef393e6906a97e4b096a6e4b67e9
  • Loading branch information
joker-eph authored and tensorflower-gardener committed Apr 13, 2020
1 parent b9a62a7 commit 36167cb
Show file tree
Hide file tree
Showing 17 changed files with 60 additions and 60 deletions.
4 changes: 2 additions & 2 deletions tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ func @squeezeAndReshape(%arg0: tensor<1x1x10xf32>, %arg1: tensor<?x10xf32>) -> i
%1 = "tf.Squeeze"(%arg1) : (tensor<?x10xf32>) -> tensor<*xf32>
%2 = "tf.Const"() { value = dense<[2, 5]> : tensor<2xi32> } : () -> tensor<2xi32>
%3 = "tf.Reshape" (%0, %2) : (tensor<1x10xf32>, tensor<2xi32>) -> tensor<2x5xf32>
%4 = "some_op"(%1, %3) : (tensor<*xf32>, tensor<2x5xf32>) -> i32
%4 = "tf.some_op"(%1, %3) : (tensor<*xf32>, tensor<2x5xf32>) -> i32
return %4 : i32
// CHECK-LABEL: squeezeAndReshape
// CHECK: "tfl.squeeze"(%arg0) {squeeze_dims = [0]} : (tensor<1x1x10xf32>) -> tensor<1x10xf32>
// CHECK: %1 = "tfl.squeeze"(%arg1) {squeeze_dims = []} : (tensor<?x10xf32>) -> tensor<*xf32>
// CHECK: %cst = constant dense<[2, 5]> : tensor<2xi32>
// CHECK: %2 = "tfl.reshape"(%0, %cst) : (tensor<1x10xf32>, tensor<2xi32>) -> tensor<2x5xf32>
// CHECK: %3 = "some_op"(%1, %2) : (tensor<*xf32>, tensor<2x5xf32>) -> i32
// CHECK: %3 = "tf.some_op"(%1, %2) : (tensor<*xf32>, tensor<2x5xf32>) -> i32
// CHECK: return
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: tf-opt -tfl-load-recipe %s | FileCheck %s --dump-input-on-failure
// RUN: tf-opt -allow-unregistered-dialect -tfl-load-recipe %s | FileCheck %s --dump-input-on-failure

// CHECK-LABEL: testLstm
func @testLstm(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>, %arg3: tensor<*xf32>, %arg4: tensor<*xf32>, %arg5: tensor<*xf32>, %arg6: tensor<*xf32>, %arg7: tensor<*xf32>, %arg8: tensor<*xf32>, %arg9: tensor<*xf32>, %arg10: tensor<*xf32>, %arg11: tensor<*xf32>, %arg12: tensor<*xf32>, %arg13: tensor<*xf32>, %arg14: tensor<*xf32>, %arg15: tensor<*xf32>, %arg16: tensor<*xf32>, %arg17: tensor<*xf32>, %arg18: tensor<*xf32>, %arg19: tensor<*xf32>, %arg20: tensor<*xf32>, %arg21: tensor<*xf32>, %arg22: tensor<*xf32>, %arg23: tensor<*xf32>) -> tensor<*xf32> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func @_functionalize_if_else_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>
}

func @_functionalize_if_then_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%0 = "my_unknown_op.blah"() : () -> tensor<i1>
%0 = "tf.blah"() : () -> tensor<i1>
return %0 : tensor<i1>
}

Expand Down Expand Up @@ -199,7 +199,7 @@ func @_functionalize_if_else_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>
}

func @_functionalize_if_then_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%0 = "my_unknown_op.blah"() : () -> tensor<i1>
%0 = "tf.blah"() : () -> tensor<i1>
return %0 : tensor<i1>
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,29 +29,29 @@ func @control_input(%arg0 : tensor<i1>) -> tensor<i32> {
}

func @while_body_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}

func @while_body_with_wrong_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) {_tpu_replicate = "wrong_cluster"} : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "wrong_cluster"} : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_with_wrong_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) {_tpu_replicate = "wrong_cluster"} : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "wrong_cluster"} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}

func @while_body_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}

Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,23 @@ module {
return %0, %1, %2, %3 : tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>
}
func @while_body_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
func @while_body_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.PartionedCalledOp"(%arg0) {f = @callee_func} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
func @callee_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,23 @@ module {
return %0#0 : tensor<i32>
}
func @while_body_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_with_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
func @while_body_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i32>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
func @while_cond_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.PartionedCalledOp"(%arg0) { f = @callee_func} : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
func @callee_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "some.op"(%arg0) : (tensor<i32>) -> tensor<i1>
%0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i1>
return %0 : tensor<i1>
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@
module attributes {tf_saved_model.semantics} {
// SAVEDMODEL: func @func_exported_1() attributes {tf_saved_model.exported_names = ["func_exported_1"]}
func @func_exported_1() attributes {tf_saved_model.exported_names = ["func_exported_1"]} {
"some_dialect.call"() {callee = {callee = {callee = @child}}} : () -> ()
"tf.some_call"() {callee = {callee = {callee = @child}}} : () -> ()
return
}

// SAVEDMODEL: func @func_exported_2() attributes {tf_saved_model.exported_names = ["func_exported_2"]}
func @func_exported_2() attributes {tf_saved_model.exported_names = ["func_exported_2"]} {
"some_dialect.call"() {callee = {callee = {callee = @child}}} : () -> ()
"tf.some_call"() {callee = {callee = {callee = @child}}} : () -> ()
return
}

Expand Down
8 changes: 4 additions & 4 deletions tensorflow/compiler/mlir/tensorflow/tests/graph_pruning.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ func @dead_island(%arg0 : i32) -> i32 {
// CHECK-NOT: tf_executor.island
%0 = tf_executor.graph {
%1:2 = tf_executor.island {
%a = "op.A"(%arg0) : (i32) -> i32
%b = "op.B"(%a) : (i32) -> i32
%a = "tf.opA"(%arg0) : (i32) -> i32
%b = "tf.opB"(%a) : (i32) -> i32
tf_executor.yield %b : i32
}
%2:2 = tf_executor.island {
%a = "op.A"(%1#0) : (i32) -> i32
%a = "tf.opA"(%1#0) : (i32) -> i32
tf_executor.yield %a : i32
}
tf_executor.fetch %1#0 : i32
Expand Down Expand Up @@ -158,7 +158,7 @@ func @control_fetch(%arg0 : i32) {
tf_executor.yield %arg0 : i32
}
%2 = tf_executor.island(%0) {
%a = "op.A"(%1#0) : (i32) -> i32
%a = "tf.opA"(%1#0) : (i32) -> i32
tf_executor.yield
}
tf_executor.fetch %2 : !tf_executor.control
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ func @non_tf_dialect_op_launch() {
// expected-error@+1 {{'tf_device.launch' op must contain only 'tf' dialect ops}}
%launch:2 = "tf_device.launch"() ( {
%b = "tf.opB"(%a) : (tensor<i1>) -> tensor<i32>
%c = "unknown.opC"(%b) : (tensor<i32>) -> tensor<f32>
tf_device.return %c, %b : tensor<f32>, tensor<i32>
%c = addi %b, %b : tensor<i32>
tf_device.return %c, %b : tensor<i32>, tensor<i32>
}) {device = "CPU:0"} : () -> (tensor<f32>, tensor<i32>)
%d = "tf.opD"() : () -> tensor<i1>
tf_executor.yield %a, %launch#0, %launch#1, %d : tensor<i1>, tensor<f32>, tensor<i32>, tensor<i1>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: tf-opt -tf-materialize-passthrough-op %s | FileCheck %s --dump-input=fail
// RUN: tf-opt -allow-unregistered-dialect -tf-materialize-passthrough-op %s | FileCheck %s --dump-input=fail


// Check that the MlirPassthroughOp is eliminated and replaced by its attached
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, pr
// CHECK: %[[MUL:.*]] = "tf.Mul"{{.*}} (tensor<1xf32>, tensor<10xf32>) -> tensor<10xf32>
// CHECK: %[[ADD:.*]] = "tf.Add"(%[[MUL]], %[[MUL]]) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
// CHECK: %[[CAST:.*]] = "tf.Cast"(%[[ADD]]) {{.*}} : (tensor<10xf32>) -> tensor<*xf32>
// CHECK: %[[UNKNOWN:.*]] = "unknown.A"(%[[CAST]]) : (tensor<*xf32>) -> tensor<*xf32>
// CHECK: %[[UNKNOWN:.*]] = addf %[[CAST]], %[[CAST]] : tensor<*xf32>
// CHECK: return %[[UNKNOWN]] : tensor<*xf32>
%0 = "tf.Mul"(%arg0, %arg1) : (tensor<1xf32>, tensor<10xf32>) -> tensor<*xf32>
%1 = "tf.Add"(%0, %0) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
%2 = "unknown.A"(%1) : (tensor<*xf32>) -> tensor<*xf32>
%2 = addf %1, %1 : tensor<*xf32>
return %2 : tensor<*xf32>
}

Expand Down
16 changes: 8 additions & 8 deletions tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
// CHECK-LABEL: func @opaquetensorattr
func @opaquetensorattr() -> () {
^bb0:
// CHECK: "opaqueIntTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xi32>} : () -> ()
"opaqueIntTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xi32>} : () -> ()
// CHECK: "opaqueFloatTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xf32>} : () -> ()
"opaqueFloatTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xf32>} : () -> ()
// CHECK: "opaqueStringTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.string>} : () -> ()
"opaqueStringTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.string>} : () -> ()
// CHECK: "opaqueResourceTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.resource>} : () -> ()
"opaqueResourceTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.resource>} : () -> ()
// CHECK: "tf.opaqueIntTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xi32>} : () -> ()
"tf.opaqueIntTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xi32>} : () -> ()
// CHECK: "tf.opaqueFloatTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xf32>} : () -> ()
"tf.opaqueFloatTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4xf32>} : () -> ()
// CHECK: "tf.opaqueStringTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.string>} : () -> ()
"tf.opaqueStringTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.string>} : () -> ()
// CHECK: "tf.opaqueResourceTensor"() {bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.resource>} : () -> ()
"tf.opaqueResourceTensor"(){bar = opaque<"tf", "0x68656C6C6F"> : tensor<2x1x4x!tf.resource>} : () -> ()
return
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func @graph_with_invalid_terminator(%arg0: tensor<*xf32>) -> tensor<*xf32> {

// Check that a tf_executor.fetch parent is a graph.
func @parent_is_graph() {
"some.op"() ({
"tf.some_op"() ({
tf_executor.fetch
// expected-error@-1 {{'tf_executor.fetch' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -174,7 +174,7 @@ func @invalid_fetch(%arg0: tensor<*xf32>, %ctl: !tf_executor.control) -> tensor<

// Check that a tf_executor.island parent is a graph.
func @parent_is_graph() {
"some.op"() ({
"tf.some_op"() ({
%ctl = tf_executor.island {}
// expected-error@-1 {{'tf_executor.island' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -248,7 +248,7 @@ func @invalid_island(%arg0: tensor<*xf32>, %ctl: !tf_executor.control) {

// Check that a tf_executor.yield parent is a tf_executor.island.
func @parent_is_island() {
"some.op"() ({
"tf.some_op"() ({
tf_executor.yield
// expected-error@-1 {{'tf_executor.yield' op expects parent op 'tf_executor.island'}}
}) : () -> ()
Expand Down Expand Up @@ -324,7 +324,7 @@ func @invalid_yield(%arg0: tensor<*xf32>, %ctl: !tf_executor.control) {

// Check that a tf_executor.Switch parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>, %arg1: tensor<i1>) {
"some.op"() ({
"tf.some_op"() ({
%true, %false, %ctlSwitch = tf_executor.Switch %arg0, %arg1 : tensor<*xf32>
// expected-error@-1 {{'tf_executor.Switch' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -382,7 +382,7 @@ func @invalid_switch(%arg0: tensor<*xf32>, %arg1: tensor<i1>) -> tensor<*xf32> {

// Check that a tf_executor.SwitchN parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>, %arg1: tensor<i32>) {
"some.op"() ({
"tf.some_op"() ({
%1:6 = tf_executor.SwitchN %arg0, %arg1 of 5 : tensor<*xf32>
// expected-error@-1 {{'tf_executor.SwitchN' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -472,7 +472,7 @@ func @invalid_switchN(%arg0: tensor<i32>, %arg1: tensor<*xf32>) -> tensor<*xf32>

// Check that a tf_executor.Merge parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>) {
"some.op"() ({
"tf.some_op"() ({
%value, %idx, %ctlMerge = tf_executor.Merge %arg0, %arg0 : tensor<*xf32>
// expected-error@-1 {{'tf_executor.Merge' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -612,7 +612,7 @@ func @invalid_merge(%arg0: tensor<*xf32>, %arg1: tensor<i1>) -> tensor<*xf32> {

// Check that a tf_executor.Enter parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>) {
"some.op"() ({
"tf.some_op"() ({
%res:2 = tf_executor.Enter %arg0 frame "some/fra\"me" : tensor<*xf32>
// expected-error@-1 {{'tf_executor.Enter' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand All @@ -635,7 +635,7 @@ func @invalid_enter(%arg0: tensor<*xf32>, %arg1: i1) -> tensor<*xf32> {

// Check that a tf_executor.NextIteration.Sink parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>, %arg1: !tf_executor.token) {
"some.op"() ({
"tf.some_op"() ({
tf_executor.NextIteration.Sink[%arg1] %arg0 : tensor<*xf32>
// expected-error@-1 {{'tf_executor.NextIteration.Sink' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand All @@ -646,7 +646,7 @@ func @parent_is_graph(%arg0: tensor<*xf32>, %arg1: !tf_executor.token) {

// Check that a tf_executor.NextIteration.Source parent is a graph.
func @parent_is_graph() {
"some.op"() ({
"tf.some_op"() ({
%1:3 = tf_executor.NextIteration.Source : tensor<*xf32>
// expected-error@-1 {{'tf_executor.NextIteration.Source' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down Expand Up @@ -720,7 +720,7 @@ func @invalid_nextiteration(%arg0: tensor<*xf32>, %arg1: i1) -> tensor<*xf32> {

// Check that a tf_executor.Exit parent is a graph.
func @parent_is_graph(%arg0: tensor<*xf32>) {
"some.op"() ({
"tf.some_op"() ({
%1:2 = tf_executor.Exit %arg0 : tensor<*xf32>
// expected-error@-1 {{'tf_executor.Exit' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand All @@ -742,7 +742,7 @@ func @exit(%arg0: tensor<*xi32>) -> tensor<*xf32> {

// Check that a tf_executor.ControlTrigger parent is a graph.
func @parent_is_graph(%arg0: !tf_executor.control, %arg1: !tf_executor.control) {
"some.op"() ({
"tf.some_op"() ({
%0 = tf_executor.ControlTrigger %arg0, %arg1
// expected-error@-1 {{'tf_executor.ControlTrigger' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand All @@ -753,7 +753,7 @@ func @parent_is_graph(%arg0: !tf_executor.control, %arg1: !tf_executor.control)

// Check that a tf_executor.LoopCond parent is a graph.
func @parent_is_graph(%arg0: tensor<i1>, %arg1: !tf_executor.control) {
"some.op"() ({
"tf.some_op"() ({
%1:2 = tf_executor.LoopCond %arg0, %arg1 : tensor<i1>
// expected-error@-1 {{'tf_executor.LoopCond' op expects parent op 'tf_executor.graph'}}
}) : () -> ()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ module attributes {tf_saved_model.semantics} {

// CHECK: func @root
func @root() attributes {tf_saved_model.exported_names = ["root"]} {
"some_dialect.call"() { callee = @child } : () -> ()
"tf.some_call"() { callee = @child } : () -> ()
return
}

Expand All @@ -36,10 +36,10 @@ module attributes {tf_saved_model.semantics} {

// Test case: Don't crash if attribute that doesn't reference a func.

"some_dialect.global_variable"() { sym_name = "some_global" } : () -> ()
"tf.some_opaque_global_variable"() { sym_name = "some_global" } : () -> ()

func @root2() attributes {tf_saved_model.exported_names = ["root2"]} {
"some_dialect.do_something_with_a_global"() { global = @some_global } : () -> ()
"tf.do_something_with_a_global"() { global = @some_global } : () -> ()
return
}

Expand All @@ -53,12 +53,12 @@ module attributes {tf_saved_model.semantics} {

// CHECK-NOT: func @recursively_dead0
func @recursively_dead0() {
"some_dialect.call"() { callee = @recursively_dead1 } : () -> ()
"tf.some_call"() { callee = @recursively_dead1 } : () -> ()
return
}
// CHECK-NOT: func @recursively_dead1
func @recursively_dead1() {
"some_dialect.call"() { callee = @recursively_dead0 } : () -> ()
"tf.some_call"() { callee = @recursively_dead0 } : () -> ()
return
}

Expand All @@ -73,7 +73,7 @@ module attributes {tf_saved_model.semantics} {

// CHECK: func @root
func @root() attributes {tf_saved_model.exported_names = ["root"]} {
"some_dialect.call"() {callee = {callee = {callee = @child}}} : () -> ()
"tf.some_call"() {callee = {callee = {callee = @child}}} : () -> ()
return
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ module attributes {tf_saved_model.semantics} {
tensor<f32> {tf_saved_model.index_path = [0, "bar"]}
) attributes { tf_saved_model.exported_names = ["some_func"] }
{
"some_dialect.some_call"() {callee = @f} : () -> ()
"tf.some_call"() {callee = @f} : () -> ()
return %arg0 : tensor<f32>
}

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/mlir/tf_mlir_opt_main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ static llvm::cl::opt<bool> verify_passes(
static llvm::cl::opt<bool> allowUnregisteredDialects(
"allow-unregistered-dialect",
llvm::cl::desc("Allow operation with no registered dialects"),
llvm::cl::init(true));
llvm::cl::init(false));

int main(int argc, char **argv) {
tensorflow::InitMlir y(&argc, &argv);
Expand Down
Loading

0 comments on commit 36167cb

Please sign in to comment.