-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
checksums.py
220 lines (183 loc) · 6.85 KB
/
checksums.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to retrieve and store size/checksums associated to URLs."""
import dataclasses
import hashlib
import io
from typing import Any, Dict, Iterable, Optional
from absl import logging
from etils import epath
from tensorflow_datasets.core import utils
_CHECKSUM_DIRS = [
utils.tfds_path() / 'url_checksums',
]
_CHECKSUM_SUFFIX = '.txt'
@dataclasses.dataclass(eq=True)
class UrlInfo:
"""Small wrapper around the url metadata (checksum, size).
Attributes:
size: Download size of the file
checksum: Checksum of the file
filename: Name of the file
"""
size: utils.Size
checksum: str
# We exclude the filename from `__eq__` for backward compatibility
# Two checksums are equals even if filename is unknown or different.
filename: Optional[str] = dataclasses.field(compare=False)
def asdict(self) -> Dict[str, Any]:
"""Returns the dict representation of the dataclass."""
return dataclasses.asdict(self)
def compute_url_info(
path: epath.PathLike,
checksum_cls=hashlib.sha256,
) -> UrlInfo:
"""Locally compute size, checksums of the given file."""
path = epath.Path(path)
checksum = checksum_cls()
size = 0
with path.open('rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
size += len(block)
if not block:
break
checksum.update(block)
return UrlInfo(
checksum=checksum.hexdigest(), # base64 digest would have been better.
size=utils.Size(size),
filename=path.name,
)
def add_checksums_dir(checksums_dir: str) -> None:
"""Registers a new checksums dir.
This function allow external datasets not present in the tfds repository to
define their own checksums_dir containing the dataset downloads checksums.
Note: When redistributing your dataset, you should distribute the checksums
files with it and set `add_checksums_dir` when the user is importing your
`my_dataset.py`.
```
# Set-up the folder containing the 'my_dataset.txt' checksums.
checksum_dir = os.path.join(os.path.dirname(__file__), 'checksums/')
checksum_dir = os.path.normpath(checksum_dir)
# Add the checksum dir (will be executed when the user import your dataset)
tfds.download.add_checksums_dir(checksum_dir)
class MyDataset(tfds.core.DatasetBuilder):
...
```
Args:
checksums_dir: `str`, checksums dir to add to the registry
"""
logging.warning(
'`tfds.core.add_checksums_dir` is deprecated. Refactor dataset in '
'self-contained folders (`my_dataset/` folder containing '
'my_dataset.py, my_dataset_test.py, dummy_data/, checksums.tsv). '
'The checksum file will be automatically detected. More info at: '
'https://www.tensorflow.org/datasets/add_dataset'
)
if checksums_dir in _CHECKSUM_DIRS: # Avoid duplicate
return
_CHECKSUM_DIRS.append(checksums_dir)
@utils.memoize()
def _checksum_paths() -> Dict[str, epath.Path]:
"""Returns dict {'dataset_name': 'path/to/checksums/file'}."""
dataset2path = {}
for dir_path in _CHECKSUM_DIRS:
if isinstance(dir_path, str):
dir_path = epath.Path(dir_path)
if not dir_path.exists():
pass
for file_path in dir_path.iterdir():
if not file_path.name.endswith(_CHECKSUM_SUFFIX):
continue
dataset_name = file_path.name[: -len(_CHECKSUM_SUFFIX)]
dataset2path[dataset_name] = file_path
return dataset2path
def _parse_url_infos(checksums_file: Iterable[str]) -> Dict[str, UrlInfo]:
"""Returns {URL: (size, checksum)}s stored within given file."""
url_infos = {}
for line in checksums_file:
line = line.strip() # Remove the trailing '\r' on Windows OS.
if not line or line.startswith('#'):
continue
values = line.split('\t')
if len(values) == 1: # not enough values to unpack (legacy files)
# URL might have spaces inside, but size and checksum will not.
values = line.rsplit(' ', 2)
if len(values) == 4:
url, size, checksum, filename = values
elif len(values) == 3:
url, size, checksum = values
filename = None
else:
raise AssertionError(f'Error parsing checksums: {values}')
url_infos[url] = UrlInfo(
size=utils.Size(size),
checksum=checksum,
filename=filename,
)
return url_infos
@utils.memoize()
def get_all_url_infos() -> Dict[str, UrlInfo]:
"""Returns dict associating URL to UrlInfo."""
url_infos = {}
for path in _checksum_paths().values():
dataset_url_infos = load_url_infos(path)
for url, url_info in dataset_url_infos.items():
if url_infos.get(url, url_info) != url_info:
raise AssertionError(
'URL {} is registered with 2+ distinct size/checksum tuples. '
'{} vs {}'.format(url, url_info, url_infos[url])
)
url_infos.update(dataset_url_infos)
return url_infos
def load_url_infos(path: epath.PathLike) -> Dict[str, UrlInfo]:
"""Loads the checksums."""
return _parse_url_infos(epath.Path(path).read_text().splitlines())
def save_url_infos(
path: epath.Path,
url_infos: Dict[str, UrlInfo],
) -> None:
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
path: Path to the resources.
url_infos: dict, {url: (size_in_bytes, checksum)}.
"""
original_data = load_url_infos(path) if path.exists() else {}
new_data = original_data.copy()
new_data.update(url_infos)
# Compare filenames separately, as filename field is eq=False
if original_data == new_data and _filenames_equal(original_data, new_data):
return
lines = [
f'{url}\t{int(url_info.size)}\t{url_info.checksum}\t'
f'{url_info.filename or ""}\n'
for url, url_info in sorted(new_data.items())
]
path.write_text(''.join(lines), encoding='UTF-8')
def _filenames_equal(
left: Dict[str, UrlInfo],
right: Dict[str, UrlInfo],
) -> bool:
"""Compare filenames."""
return all(
l.filename == r.filename for _, (l, r) in utils.zip_dict(left, right)
)