[go: nahoru, domu]

Skip to content

Commit

Permalink
Supersed pylint_allowlist
Browse files Browse the repository at this point in the history
  • Loading branch information
bhack committed Apr 3, 2021
1 parent 38f4134 commit c58b714
Show file tree
Hide file tree
Showing 86 changed files with 264 additions and 265 deletions.
2 changes: 1 addition & 1 deletion tensorflow/compiler/tests/binary_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1091,7 +1091,7 @@ def testBatchMatMul(self):
math_ops.matmul,
np.array([], dtype=dtype).reshape((0, 2, 4)),
np.array([], dtype=dtype).reshape((0, 4, 3)),
expected=np.array([], dtype=dtype).reshape(0, 2, 3))
expected=np.array([], dtype=dtype).reshape(0, 2, 3)) # pylint: disable=too-many-function-args

# Regression test for b/31472796.
if dtype != np.float16 and hasattr(np, "matmul"):
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/tests/image_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = _generate_numpy_random_rgb((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
rgb_np = rgb_flat.reshape(4, 4, 4, 3) # pylint: disable=too-many-function-args
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
hsv_np = hsv_np.reshape(4, 4, 4, 3) # pylint: disable=too-many-function-args
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/python/interpreter_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ def testBaseProtectsFunctions(self):
_ = self.interpreter.allocate_tensors()
# Make sure we get an exception if we try to run an unsafe operation
with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'):
_ = self.interpreter.invoke()
_ = self.interpreter.invoke() # pylint: disable=assignment-from-no-return
# Now test that we can run
del in0 # this is our only buffer reference, so now it is safe to change
in0safe = self.interpreter.tensor(self.input0)
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/autograph/converters/directives_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def test_loop_target(self):
def f():
a = True
while True:
directives.set_loop_options(parallel_iterations=10, back_prop=a)
directives.set_loop_options(parallel_iterations=10, back_prop=a) # pylint: disable=unexpected-keyword-arg
pass

_, node, _ = self.transform(f, directives_converter, include_ast=True)
Expand All @@ -86,7 +86,7 @@ def f():
a = 1
while True:
a = 2
directives.set_loop_options(parallel_iterations=10, back_prop=a)
directives.set_loop_options(parallel_iterations=10, back_prop=a) # pylint: disable=unexpected-keyword-arg

with self.assertRaisesRegex(ValueError, 'must be the first statement'):
self.transform(f, directives_converter, include_ast=True)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/autograph/pyct/static_analysis/annos.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

class NoValue(Enum):

def __repr__(self):
def __repr__(self): # pylint: disable=invalid-repr-returned
return self.name


Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/client/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -1391,7 +1391,7 @@ def _do_call(self, fn, *args):
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
raise type(e)(node_def, op, message) # pylint: disable=no-value-for-parameter

def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/compiler/tensorrt/test/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def ExpectedEnginesToBuild(self, run_params):
}

def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def GetParams(self):
[[4, 6680]])

def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def ExpectedEnginesToBuild(self, run_params):
# TODO(b/176540862): remove this routine to disallow native segment execution
# for TensorRT 7+.
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
if trt_test.IsTensorRTVersionGreaterEqual(7):
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def GetParams(self):
expected_output_dims=expected_output_dims)

def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/compiler/tensorrt/test/int32_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[100, 4]], [[100, 10]])

def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def run(self, input1, input2):
root = SimpleModel()

# Saved TF model
save(root, tf_saved_model_dir,
save(root, tf_saved_model_dir, # pylint: disable=not-callable
{signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.run})

# Convert TF model to TensorRT
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def ExpectedEnginesToBuild(self, run_params):

# TODO(b/159459919): remove this routine to disallow native segment execution.
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"


Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/compiler/tensorrt/test/vgg_block_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def ExpectedEnginesToBuild(self, run_params):

# TODO(b/159459919): remove this routine to disallow native segment execution.
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp()
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -397,10 +397,10 @@ def testSerializedContainingDense(self):

expected_output = {
aname:
np.array(
np.array( # pylint: disable=too-many-function-args
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
np.array( # pylint: disable=too-many-function-args
["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}

Expand Down Expand Up @@ -445,10 +445,10 @@ def testSerializedContainingDenseWithConcat(self):

expected_output = {
aname:
np.array(
np.array( # pylint: disable=too-many-function-args
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
np.array( # pylint: disable=too-many-function-args
["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}

Expand Down Expand Up @@ -506,11 +506,11 @@ def testSerializedContainingDenseWithDefaults(self):

expected_output = {
"a":
np.array(
np.array( # pylint: disable=too-many-function-args
[[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2,
1),
"b":
np.array(
np.array( # pylint: disable=too-many-function-args
["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1,
1),
}
Expand Down Expand Up @@ -771,7 +771,7 @@ def testSerializedContainingVarLenDense(self):

expected_output = {
aname:
np.array(
np.array( # pylint: disable=too-many-function-args
[
[0, 0, 0, 0],
[1, 1, 0, 0],
Expand All @@ -780,7 +780,7 @@ def testSerializedContainingVarLenDense(self):
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
np.array( # pylint: disable=too-many-function-args
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
Expand Down Expand Up @@ -809,7 +809,7 @@ def testSerializedContainingVarLenDense(self):

# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
expected_output_custom_padding[aname] = np.array( # pylint: disable=too-many-function-args
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/data/experimental/ops/optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def model():

def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access
return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access # pylint: disable=no-value-for-parameter

return _apply_fn

Expand Down
12 changes: 6 additions & 6 deletions tensorflow/python/distribute/combinations_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class ClusterCombinationTest(test.TestCase, parameterized.TestCase):
# Note that we don't have a standalone combination for ClusterParameters, so
# we should use GPUCombination which contains it.

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(distribution=[
combinations.NamedDistribution(
"HasClusterParams", lambda: None, has_chief=True, num_workers=2),
Expand All @@ -49,7 +49,7 @@ def testClusterParams(self, distribution, has_chief, num_workers):
self.assertTrue(has_chief)
self.assertEqual(num_workers, 2)

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(distribution=[
combinations.NamedDistribution("NoClusterParams", lambda: None),
]),
Expand All @@ -58,14 +58,14 @@ def testClusterParamsHasDefault(self, distribution, has_chief, num_workers):
self.assertFalse(has_chief)
self.assertEqual(num_workers, 1)

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(v=1),
test_combinations=(combinations.ClusterCombination(),))
def testClusterParamsNoStrategy(self, v, has_chief, num_workers):
self.assertFalse(has_chief)
self.assertEqual(num_workers, 1)

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(distribution=[
combinations.NamedDistribution(
"WithClusterParams", lambda: None, has_chief=True, num_workers=2),
Expand All @@ -76,7 +76,7 @@ def testClusterParamsAreOptional(self, distribution):
# If combinations library doesn't raise an exception, the test is passed.
pass

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(
ds1=combinations.NamedDistribution(
"Strategy1", lambda: None, has_chief=True, num_workers=0),
Expand Down Expand Up @@ -121,7 +121,7 @@ def testUpdateEnvInWorker(self):
@unittest.expectedFailure
class ClusterParametersShouldFailTest(test.TestCase, parameterized.TestCase):

@framework_combinations.generate(
@framework_combinations.generate( # pylint: disable=redundant-keyword-arg
framework_combinations.combine(
ds1=combinations.NamedDistribution(
"Strategy1", lambda: None, has_chief=True, num_workers=2),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,7 @@ def test_prefetch_to_device_dataset(self, prefetch_to_device):
experimental_fetch_to_device=prefetch_to_device)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset = distribution.experimental_distribute_dataset( # pylint: disable=assignment-from-no-return
dataset, options=input_options)
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
Expand All @@ -807,7 +807,7 @@ def test_prefetch_to_host_dataset(self):
experimental_fetch_to_device=False)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset = distribution.experimental_distribute_dataset( # pylint: disable=assignment-from-no-return
dataset, options=input_options)
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/distribute/strategy_test_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,7 +751,7 @@ def _testDeviceScope(self, distribution):

def _testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = self._get_num_gpus()
num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return
num_workers = 1

expected_values = [[i+j for j in range(num_gpus)] * num_workers
Expand All @@ -775,7 +775,7 @@ def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = self._get_num_gpus()
num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return
num_workers = 1

expected_values = []
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/eager/pywrap_tfe_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def testGraphResourceVariableRaisesFallback(self):
False)

def testOpDefDefaultType(self):
im = np.random.randint(
im = np.random.randint( # pylint: disable=too-many-function-args
low=0, high=65535, size=100, dtype=np.uint16).reshape(10, 10, 1)

context.ensure_initialized()
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/python/eager/tensor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,9 +488,9 @@ def testUnicode(self):
self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf")

def testFloatTensor(self):
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)
self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype)
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype) # pylint: disable=no-value-for-parameter
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype) # pylint: disable=no-value-for-parameter
self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype) # pylint: disable=no-value-for-parameter
self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)

def testSliceDimOutOfRange(self):
Expand Down
20 changes: 10 additions & 10 deletions tensorflow/python/feature_column/feature_column_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3259,14 +3259,14 @@ def test_invalid_type(self):
ValueError,
'All feature_columns must be _FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column'))
(self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column')) # pylint: disable=abstract-class-instantiated

def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),))
(self._TestFeatureColumn({key1: parse_spec1}),)) # pylint: disable=abstract-class-instantiated
self.assertDictEqual({key1: parse_spec1}, actual)

def test_two_feature_columns(self):
Expand All @@ -3276,8 +3276,8 @@ def test_two_feature_columns(self):
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2})))
(self._TestFeatureColumn({key1: parse_spec1}), # pylint: disable=abstract-class-instantiated
self._TestFeatureColumn({key2: parse_spec2}))) # pylint: disable=abstract-class-instantiated
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)

def test_equal_keys_different_parse_spec(self):
Expand All @@ -3289,16 +3289,16 @@ def test_equal_keys_different_parse_spec(self):
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec2})))
(self._TestFeatureColumn({key1: parse_spec1}), # pylint: disable=abstract-class-instantiated
self._TestFeatureColumn({key1: parse_spec2}))) # pylint: disable=abstract-class-instantiated

def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec1})))
(self._TestFeatureColumn({key1: parse_spec1}), # pylint: disable=abstract-class-instantiated
self._TestFeatureColumn({key1: parse_spec1}))) # pylint: disable=abstract-class-instantiated
self.assertDictEqual({key1: parse_spec1}, actual)

def test_multiple_features_dict(self):
Expand All @@ -3311,8 +3311,8 @@ def test_multiple_features_dict(self):
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3})))
(self._TestFeatureColumn({key1: parse_spec1}), # pylint: disable=abstract-class-instantiated
self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3}))) # pylint: disable=abstract-class-instantiated
self.assertDictEqual(
{key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/framework/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ def __reduce__(self):
]:
if pdt not in _NP_TO_TF:
_NP_TO_TF[pdt] = next(
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype)
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype) # pylint: disable=no-value-for-parameter

TF_VALUE_DTYPES = set(_NP_TO_TF.values())

Expand Down
Loading

0 comments on commit c58b714

Please sign in to comment.