[go: nahoru, domu]

Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sign compare warning fixes batch 1 #40371

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class ImportQuantStatsPass
// If the index is out of range, this method returns false. Otherwise it
// returns true if the value is a float tensor.
bool IsQuantizableResult(Operation *op, int index) {
if (index < 0 || index >= op->getNumResults()) return false;
if (index < 0 || index >= static_cast<int>(op->getNumResults())) return false;
Value res = op->getResult(index);
return res.getType().isa<ShapedType>() &&
res.getType().cast<ShapedType>().getElementType().isa<FloatType>();
Expand Down Expand Up @@ -158,7 +158,7 @@ void ImportQuantStatsPass::ImportAsStatsOps(OpBuilder b, Operation *op,
InsertStatsOpAtResult(b, op->getResult(index), layer_stats, axis_stats,
axis);
} else {
for (int i = 0; i < op->getNumResults(); ++i) {
for (int i = 0, e = op->getNumResults(); i < e; ++i) {
if (IsQuantizableResult(op, i)) {
InsertStatsOpAtResult(b, op->getResult(i), layer_stats, axis_stats,
axis);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names,
std::vector<double> node_mins;
if (!min_values.empty()) {
std::vector<std::string> node_mins_str = absl::StrSplit(min_values, ',');
for (int i = 0; i < node_mins_str.size(); i++) {
for (int i = 0, e = node_mins_str.size(); i < e; i++) {
double value;
if (!absl::SimpleAtod(node_mins_str[i], &value)) {
if (!absl::SimpleAtod(node_mins_str[i], &value)) {
return true;
}
node_mins.push_back(value);
Expand All @@ -60,7 +60,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names,
std::vector<double> node_maxs;
if (!max_values.empty()) {
std::vector<std::string> node_maxs_str = absl::StrSplit(max_values, ',');
for (int i = 0; i < node_maxs_str.size(); i++) {
for (int i = 0, e = node_maxs_str.size(); i < e; i++) {
double value;
if (!absl::SimpleAtod(node_maxs_str[i], &value)) {
llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ class QuantizationDriver {
return;
if (current_op == op) llvm::errs() << "===>>>";
llvm::errs() << op->getName() << " : (";
for (auto i = 0; i < op->getNumOperands(); ++i) {
for (int i = 0, e = op->getNumOperands(); i < e; ++i) {
if (auto params = GetOperandQuantState(op, i).params)
params.print(llvm::errs());
else
Expand All @@ -303,7 +303,7 @@ class QuantizationDriver {
llvm::errs() << ",";
}
llvm::errs() << ") -> (";
for (auto i = 0; i < op->getNumResults(); ++i) {
for (int i = 0, e = op->getNumResults(); i < e; ++i) {
if (auto params = GetResultQuantState(op, i).params)
params.print(llvm::errs());
else
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ static Type GetQuantizedType(Builder builder, Type input_type,
} else if (min.size() == max.size()) {
auto shape = input_type.dyn_cast<ShapedType>();
if (!shape || shape.getRank() <= quant_dim ||
min.size() != shape.getDimSize(quant_dim)) {
static_cast<int64_t>(min.size()) != shape.getDimSize(quant_dim)) {
return {};
}
// TODO(b/141508873): the quantization dim is set to the last dimension.
Expand All @@ -75,7 +75,7 @@ TypeAttr RescaleQuantizedType(Type input, Attribute factor) {
if (auto qtype = ele_type.dyn_cast<quant::UniformQuantizedPerAxisType>()) {
ArrayRef<double> scales = qtype.getScales();
// Broadcasting hasn't been implemented yet.
if (scales.size() != factor_values.getNumElements()) return {};
if (static_cast<int64_t>(scales.size()) != factor_values.getNumElements()) return {};
SmallVector<double, 4> new_scales;
new_scales.reserve(scales.size());
auto scales_iter = scales.begin();
Expand Down Expand Up @@ -269,7 +269,7 @@ Type GetUniformQuantizedPerAxisTypeForWeight(ElementsAttr attr, int quant_dim,
bool narrow_range) {
Builder builder(attr.getContext());
auto shape = attr.getType().cast<ShapedType>().getShape();
if (shape.size() <= quant_dim) return {};
if (static_cast<int>(shape.size()) <= quant_dim) return {};
// `symmetric` can only be used when it is `signed` and `narrow_range`.
if (symmetric && (!is_signed || !narrow_range)) return {};

Expand Down Expand Up @@ -334,7 +334,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias(
const std::vector<quant::QuantizedType>& op_types) {
if (op_types.empty()) return {};

int axis_size = 1;
size_t axis_size = 1;
int32_t quant_dim = -1;
Type expressed_type;
// Requires all the op types are valid UniformQuantizedTypes or
Expand Down Expand Up @@ -368,7 +368,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias(
scales[index_scale.index()] *= index_scale.value();
}
} else if (auto type = op_type.dyn_cast<quant::UniformQuantizedType>()) {
for (int index = 0; index != axis_size; ++index) {
for (int index = 0, e = axis_size; index != e; ++index) {
scales[index] *= type.getScale();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ std::string MakeUniqueFilename(string name) {
static NameCounts& instance = *new NameCounts;

// Remove illegal characters from `name`.
for (int i = 0; i < name.size(); ++i) {
for (int i = 0, e = name.size(); i < e; ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' ||
ch == '\\') {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/mlir/xla/ir/chlo_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ static Type GetBroadcastType(Type x, Type y, Type element_type,

if (shape_x.size() == shape_y.size()) {
llvm::SmallVector<int64_t, 4> out_shape(shape_x.size());
for (int i = 0; i < shape_x.size(); i++) {
for (int i = 0, e = shape_x.size(); i < e; i++) {
auto x_val = shape_x[i];
auto y_val = shape_y[i];
if (x_val == -1 || y_val == -1) {
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/compiler/mlir/xla/ir/hlo_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ DenseIntElementsAttr BuildConvPaddingAttrs(

int rank = padding_low.size();
SmallVector<int64_t, 8> padding;
for (unsigned i = 0; i < rank; ++i) {
for (unsigned i = 0, e = rank; i < e; ++i) {
padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]);
padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]);
}
Expand Down Expand Up @@ -853,7 +853,7 @@ static Attribute foldConcatenateHelper(ConcatenateOp* op,
auto shape = type.getShape();

size_t top_size = 1;
for (int i = 0; i < axis; i++) {
for (int i = 0, e = axis; i < e; i++) {
top_size = top_size * shape[i];
}

Expand Down Expand Up @@ -1118,7 +1118,7 @@ static LogicalResult Verify(MapOp op) {
// increasing.
auto values = op.dimensions().getValues<int64_t>();
auto dimensions = std::vector<int64_t>{values.begin(), values.end()};
for (int i = 0; i < dimensions.size(); ++i) {
for (int i = 0, e = dimensions.size(); i < e; ++i) {
if (dimensions[i] != i)
return op.emitOpError() << "requires monotonically increasing dimension "
"numbers, but got: "
Expand Down