[go: nahoru, domu]

Skip to content

Commit

Permalink
Use array literals where possible in MLIR source (#1930)
Browse files Browse the repository at this point in the history
This change flips all the literals that can be written using `array`
instead of `dense` without requiring C++ changes. The purpose of this
change is to reduce the diff in a future change, where I will actually
change the op fields to use `DenseI64ArrayAttr` instead of
`I64DenseArrayOrElements1DAttr`. That will require making changes to
parsing and serialization, among others.
  • Loading branch information
mlevesquedion committed Jan 22, 2024
1 parent f1a390e commit 85e839e
Show file tree
Hide file tree
Showing 20 changed files with 405 additions and 382 deletions.
32 changes: 16 additions & 16 deletions stablehlo/conversions/linalg/tests/convolution.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ func.func @linalg.conv_1d_nwc(%arg0: tensor<?x8x?xf32>, %arg1: tensor<2x?x?xf32>
>,
feature_group_count = 1 : i64,
padding = dense<[[0, 0]]> : tensor<1x2xi64>,
rhs_dilation = dense<1> : tensor<1xi64>,
window_strides = dense<1> : tensor<1xi64>,
rhs_dilation = array<i64: 1>,
window_strides = array<i64: 1>,
someattr
} : (tensor<?x8x?xf32>, tensor<2x?x?xf32>) -> tensor<?x7x?xf32>
func.return %0 : tensor<?x7x?xf32>
Expand Down Expand Up @@ -80,8 +80,8 @@ func.func @conv_2d_nhwc_hwcf(%arg0: tensor<?x4x5x?xf32>, %arg1: tensor<3x2x?x?xf
>,
feature_group_count = 1 : i64,
padding = dense<[[0, 0], [0, 0]]> : tensor<2x2xi64>,
rhs_dilation = dense<1> : tensor<2xi64>,
window_strides = dense<1> : tensor<2xi64>
rhs_dilation = array<i64: 1, 1>,
window_strides = array<i64: 1, 1>
} : (tensor<?x4x5x?xf32>, tensor<3x2x?x?xf32>) -> tensor<?x2x4x?xf32>
func.return %0 : tensor<?x2x4x?xf32>
}
Expand Down Expand Up @@ -223,8 +223,8 @@ func.func @conv_3d_ndhwc_dhwcf(%arg0: tensor<?x8x8x8x?xf32>, %arg1: tensor<2x2x2
>,
feature_group_count = 1 : i64,
padding = dense<[[0, 0], [0, 0], [0, 0]]> : tensor<3x2xi64>,
rhs_dilation = dense<1> : tensor<3xi64>,
window_strides = dense<1> : tensor<3xi64>
rhs_dilation = array<i64: 1, 1, 1>,
window_strides = array<i64: 1, 1, 1>
} : (tensor<?x8x8x8x?xf32>, tensor<2x2x2x?x?xf32>) -> tensor<?x7x7x7x?xf32>
func.return %0 : tensor<?x7x7x7x?xf32>
}
Expand Down Expand Up @@ -263,8 +263,8 @@ func.func @conv2d_1452x2223_dilated_valid(%arg0: tensor<1x4x5x2xf32>, %arg1: ten
>,
feature_group_count = 1 : i64,
padding = dense<0> : tensor<2x2xi64>,
rhs_dilation = dense<[2, 1]> : tensor<2xi64>,
window_strides = dense<1> : tensor<2xi64>
rhs_dilation = array<i64: 2, 1>,
window_strides = array<i64: 1, 1>
} : (tensor<1x4x5x2xf32>, tensor<2x2x2x3xf32>) -> tensor<1x2x4x3xf32>
func.return %0 : tensor<1x2x4x3xf32>
}
Expand Down Expand Up @@ -349,8 +349,8 @@ func.func @depthwise_conv(%arg0: tensor<2x4x5x2xf32>,
>,
feature_group_count = 2 : i64,
padding = dense<0> : tensor<2x2xi64>,
rhs_dilation = dense<1> : tensor<2xi64>,
window_strides = dense<1> : tensor<2xi64>,
rhs_dilation = array<i64: 1, 1>,
window_strides = array<i64: 1, 1>,
someattr} : (tensor<2x4x5x2xf32>, tensor<2x2x1x6xf32>) -> tensor<2x3x4x6xf32>
func.return %0 : tensor<2x3x4x6xf32>
}
Expand Down Expand Up @@ -390,8 +390,8 @@ func.func @depthwise_conv_with_padding(
>,
feature_group_count = 2 : i64,
padding = dense<[[0, 0], [1, 1]]> : tensor<2x2xi64>,
rhs_dilation = dense<1> : tensor<2xi64>,
window_strides = dense<1> : tensor<2xi64>,
rhs_dilation = array<i64: 1, 1>,
window_strides = array<i64: 1, 1>,
someattr} : (tensor<2x4x5x2xf32>, tensor<2x2x1x4xf32>) -> tensor<2x3x6x4xf32>
func.return %0 : tensor<2x3x6x4xf32>
}
Expand Down Expand Up @@ -438,8 +438,8 @@ func.func @depthwise_conv_multiplier_1(%arg0: tensor<1x113x113x96xf32>,
>,
feature_group_count = 96 : i64,
padding = dense<0> : tensor<2x2xi64>,
rhs_dilation = dense<1> : tensor<2xi64>,
window_strides = dense<2> : tensor<2xi64>} : (tensor<1x113x113x96xf32>, tensor<3x3x1x96xf32>) -> tensor<1x56x56x96xf32>
rhs_dilation = array<i64: 1, 1>,
window_strides = array<i64: 2, 2>} : (tensor<1x113x113x96xf32>, tensor<3x3x1x96xf32>) -> tensor<1x56x56x96xf32>
func.return %0 : tensor<1x56x56x96xf32>
}
// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
Expand Down Expand Up @@ -476,8 +476,8 @@ func.func @depthwise_conv_multiplier_1_with_padding(
>,
feature_group_count = 96 : i64,
padding = dense<[[1, 1], [2, 2]]> : tensor<2x2xi64>,
rhs_dilation = dense<1> : tensor<2xi64>,
window_strides = dense<2> : tensor<2xi64>} : (tensor<1x113x113x96xf32>, tensor<3x3x1x96xf32>) -> tensor<1x57x58x96xf32>
rhs_dilation = array<i64: 1, 1>,
window_strides = array<i64: 2, 2>} : (tensor<1x113x113x96xf32>, tensor<3x3x1x96xf32>) -> tensor<1x57x58x96xf32>
func.return %0 : tensor<1x57x58x96xf32>
}
// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
Expand Down
18 changes: 9 additions & 9 deletions stablehlo/conversions/linalg/tests/gather.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func.func @gather(%operand : tensor<1x4x8xi32>, %start_indices : tensor<1x8x2xi3
start_index_map = [0, 1]
>,
indices_are_sorted = false,
slice_sizes = dense<[1, 1, 8]> : tensor<3xi64>,
slice_sizes = array<i64: 1, 1, 8>,
someattr
} : (tensor<1x4x8xi32>, tensor<1x8x2xi32>) -> tensor<1x8x8xi32>
func.return %res : tensor<1x8x8xi32>
Expand Down Expand Up @@ -59,7 +59,7 @@ func.func @gather_unsigned_index(
start_index_map = [0, 1]
>,
indices_are_sorted = false,
slice_sizes = dense<[1, 1, 8]> : tensor<3xi64>,
slice_sizes = array<i64: 1, 1, 8>,
someattr
} : (tensor<1x4x8xi32>, tensor<1x8x2xui32>) -> tensor<1x8x8xi32>
func.return %res : tensor<1x8x8xi32>
Expand All @@ -84,7 +84,7 @@ func.func @gather_unsigned(%operand : tensor<1x4x8xui32>, %start_indices : tenso
start_index_map = [0, 1]
>,
indices_are_sorted = false,
slice_sizes = dense<[1, 1, 8]> : tensor<3xi64>
slice_sizes = array<i64: 1, 1, 8>
} : (tensor<1x4x8xui32>, tensor<1x8x2xi32>) -> tensor<1x8x8xui32>
func.return %res : tensor<1x8x8xui32>
}
Expand All @@ -107,7 +107,7 @@ func.func @gather_no_collapse(%operand : tensor<6x3xi32>, %start_indices : tenso
start_index_map = [0, 1]
>,
indices_are_sorted = false,
slice_sizes = dense<[4, 2]> : tensor<2xi64>
slice_sizes = array<i64: 4, 2>
} : (tensor<6x3xi32>, tensor<5x2xi32>) -> tensor<5x4x2xi32>
func.return %res : tensor<5x4x2xi32>
}
Expand Down Expand Up @@ -148,7 +148,7 @@ func.func @gather_max_offset(%operand : tensor<?x?x?xi32>, %start_indices : tens
start_index_map = [0, 1]
>,
indices_are_sorted = false,
slice_sizes = dense<[2, 3, 4]> : tensor<3xi64>
slice_sizes = array<i64: 2, 3, 4>
} : (tensor<?x?x?xi32>, tensor<5x2xi32>) -> tensor<2x3x4x5xi32>
func.return %res : tensor<2x3x4x5xi32>
}
Expand Down Expand Up @@ -198,7 +198,7 @@ func.func @gather_reorder_start_index(%operand : tensor<6x3x2x7xi32>, %start_ind
start_index_map = [3, 1, 2, 0]
>,
indices_are_sorted = false,
slice_sizes = dense<[1, 2, 1, 4]> : tensor<4xi64>
slice_sizes = array<i64: 1, 2, 1, 4>
} : (tensor<6x3x2x7xi32>, tensor<5x4xi32>) -> tensor<5x2x4xi32>
func.return %res : tensor<5x2x4xi32>
}
Expand Down Expand Up @@ -256,7 +256,7 @@ func.func @gather_implicit_trailing_dim(%operand : tensor<?x?xi32>, %start_indic
start_index_map = [0]
>,
indices_are_sorted = false,
slice_sizes = dense<[3, 4]> : tensor<2xi64>
slice_sizes = array<i64: 3, 4>
} : (tensor<?x?xi32>, tensor<5x2xi32>) -> tensor<3x4x5x2xi32>
func.return %res : tensor<3x4x5x2xi32>
}
Expand Down Expand Up @@ -297,7 +297,7 @@ func.func @gather_non_static(%operand : tensor<?x?xi32>, %start_indices : tensor
start_index_map = [0]
>,
indices_are_sorted = false,
slice_sizes = dense<[3, 4]> : tensor<2xi64>
slice_sizes = array<i64: 3, 4>
} : (tensor<?x?xi32>, tensor<?x?xi32>) -> tensor<3x4x?xi32>
func.return %res : tensor<3x4x?xi32>
}
Expand Down Expand Up @@ -338,7 +338,7 @@ func.func @gather_unranked(%operand : tensor<*xi32>, %start_indices : tensor<?x?
start_index_map = [0]
>,
indices_are_sorted = false,
slice_sizes = dense<[3, 4]> : tensor<2xi64>
slice_sizes = array<i64: 3, 4>
} : (tensor<*xi32>, tensor<?x?xi32>) -> tensor<?x?x?xi32>
func.return %res : tensor<?x?x?xi32>
}
Expand Down
10 changes: 5 additions & 5 deletions stablehlo/conversions/linalg/tests/miscellaneous.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,7 @@ func.func @map_mixed(%arg0: tensor<?xf32>,
^bb0(%arg2: tensor<f32>, %arg3: tensor<f32>):
%1 = stablehlo.add %arg2, %arg3 : tensor<f32>
"stablehlo.return"(%1) : (tensor<f32>) -> ()
}) {dimensions = dense<0> : tensor<1xi64>}
}) {dimensions = array<i64: 0>}
: (tensor<?xf32>, tensor<4xf32>) -> tensor<?xf32>
func.return %0 : tensor<?xf32>
}
Expand All @@ -671,7 +671,7 @@ func.func @map_one_arg(%arg0: tensor<?xf32>) -> tensor<?xf32> {
^bb0(%arg2: tensor<f32>):
%1 = stablehlo.add %arg2, %arg2 : tensor<f32>
"stablehlo.return"(%1) : (tensor<f32>) -> ()
}) {dimensions = dense<0> : tensor<1xi64>}
}) {dimensions = array<i64: 0>}
: (tensor<?xf32>) -> tensor<?xf32>
func.return %0 : tensor<?xf32>
}
Expand Down Expand Up @@ -702,7 +702,7 @@ func.func @map_compare(%arg0: tensor<?xcomplex<f32>>,
{comparison_direction = #stablehlo<comparison_direction EQ>}
: (tensor<f32>, tensor<f32>) -> tensor<i1>
"stablehlo.return"(%3) : (tensor<i1>) -> ()
}) {dimensions = dense<0> : tensor<1xi64>}
}) {dimensions = array<i64: 0>}
: (tensor<?xcomplex<f32>>, tensor<?xcomplex<f32>>) -> tensor<?xi1>
func.return %0 : tensor<?xi1>
}
Expand Down Expand Up @@ -1089,8 +1089,8 @@ func.func @select_and_scatter(%arg0 : tensor<2x8x8x1xf32>, %arg1 : tensor<2x4x4x
stablehlo.return %9 : tensor<f32>
}) {
padding = dense<0> : tensor<4x2xi64>,
window_dimensions = dense<[1, 2, 2, 1]> : tensor<4xi64>,
window_strides = dense<[1, 2, 2, 1]> : tensor<4xi64>
window_dimensions = array<i64: 1, 2, 2, 1>,
window_strides = array<i64: 1, 2, 2, 1>
} : (tensor<2x8x8x1xf32>, tensor<2x4x4x1xf32>, tensor<f32>) -> tensor<2x8x8x1xf32>

return %0 : tensor<2x8x8x1xf32>
Expand Down
Loading

0 comments on commit 85e839e

Please sign in to comment.