[go: nahoru, domu]

Skip to content

Commit

Permalink
Add schema.org sdLicense
Browse files Browse the repository at this point in the history
  • Loading branch information
mkuchnik committed Feb 23, 2024
1 parent 0ee0041 commit cf0437a
Show file tree
Hide file tree
Showing 90 changed files with 242 additions and 46 deletions.
1 change: 1 addition & 0 deletions datasets/0.8/audio_test/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"@type": "sc:Dataset",
"name": "audio_test",
"description": "This is the basic test case for audio files",
"sdLicense": "apache-2.0",
"url": "None",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/bigcode-the-stack/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "The Stack contains over 6TB of permissively-licensed source code files covering 358 programming languages. The dataset was created as part of the BigCode Project, an open scientific collaboration working on the responsible development of Large Language Models for Code (Code LLMs). The Stack serves as a pre-training dataset for Code LLMs, i.e., code-generating AI systems which enable the synthesis of programs from natural language descriptions as well as other from code snippets.",
"citation": "@article{Kocetkov2022TheStack, title={The Stack: 3 TB of permissively licensed source code}, author={Kocetkov, Denis and Li, Raymond and Ben Allal, Loubna and Li, Jia and Mou,Chenghao and Mu\u00f1oz Ferrandis, Carlos and Jernite, Yacine and Mitchell, Margaret and Hughes, Sean and Wolf, Thomas and Bahdanau, Dzmitry and von Werra, Leandro and de Vries, Harm}, journal={Preprint}, year={2022} }",
"license": "other",
"sdLicense": "apache-2.0",
"url": "https://huggingface.co/datasets/bigcode/the-stack",
"distribution": [
{
Expand Down
3 changes: 2 additions & 1 deletion datasets/0.8/coco2014-mini/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@
"description": "Smaller downloadable version of COCO to be used in unit tests.",
"citation": "None",
"license": "cc-by-4.0",
"version": "1.0.0",
"sdLicense": "apache-2.0",
"url": "None",
"version": "1.0.0",
"distribution": [
{
"@type": "sc:FileObject",
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/coco2014/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
"description": "COCO is a large-scale object detection, segmentation, and captioning dataset. WARNING: `metadata.json` is incomplete and does not fully define the COCO2014 dataset. It lacks `recordSet` definitions that would enable automatic loading of all the annotations.",
"citation": "@article{DBLP:journals/corr/LinMBHPRDZ14,\n author = {Tsung{-}Yi Lin and\n Michael Maire and\n Serge J. Belongie and\n Lubomir D. Bourdev and\n Ross B. Girshick and\n James Hays and\n Pietro Perona and\n Deva Ramanan and\n Piotr Doll{'{a}}r and\n C. Lawrence Zitnick},\n title = {Microsoft {COCO:} Common Objects in Context},\n journal = {CoRR},\n volume = {abs/1405.0312},\n year = {2014},\n url = {http://arxiv.org/abs/1405.0312},\n archivePrefix = {arXiv},\n eprint = {1405.0312},\n timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}",
"license": "cc-by-4.0",
"sdLicense": "apache-2.0",
"url": "https://cocodataset.org/",
"distribution": [
{
Expand Down
3 changes: 2 additions & 1 deletion datasets/0.8/fashion-mnist/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "Fashion-MNIST is a dataset of Zalando's article images\u2014consisting of a training set of\n60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image,\nassociated with a label from 10 classes. We intend Fashion-MNIST to serve as a direct drop-in\nreplacement for the original MNIST dataset for benchmarking machine learning algorithms.\nIt shares the same image size and structure of training and testing splits.\n",
"citation": "@article{DBLP:journals/corr/abs-1708-07747,\n author = {Han Xiao and\n Kashif Rasul and\n Roland Vollgraf},\n title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning\n Algorithms},\n journal = {CoRR},\n volume = {abs/1708.07747},\n year = {2017},\n url = {http://arxiv.org/abs/1708.07747},\n archivePrefix = {arXiv},\n eprint = {1708.07747},\n timestamp = {Mon, 13 Aug 2018 16:47:27 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/abs-1708-07747},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n",
"license": "mit",
"sdLicense": "apache-2.0",
"url": "https://huggingface.co/datasets/fashion_mnist",
"distribution": [
{
Expand Down Expand Up @@ -96,4 +97,4 @@
]
}
]
}
}
1 change: 1 addition & 0 deletions datasets/0.8/flores-200/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
"@inproceedings{twoeval, title={Two New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-English}, author={Guzm\\'{a}n, Francisco and Chen, Peng-Jen and Ott, Myle and Pino, Juan and Lample, Guillaume and Koehn, Philipp and Chaudhary, Vishrav and Ranzato, Marc'Aurelio}, journal={arXiv preprint arXiv:1902.01382}, year={2019}}"
],
"license": "cc-by-sa-4.0",
"sdLicense": "apache-2.0",
"url": "https://github.com/facebookresearch/flores",
"version": "0.0.1",
"distribution": [
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/gpt-3/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions \u2013 something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.",
"citation": "@article{brown2020language, title={Language Models are Few-Shot Learners}, author={Tom B. Brown and Benjamin Mann and Nick Ryder and Melanie Subbiah and Jared Kaplan and Prafulla Dhariwal and Arvind Neelakantan and Pranav Shyam and Girish Sastry and Amanda Askell and Sandhini Agarwal and Ariel Herbert-Voss and Gretchen Krueger and Tom Henighan and Rewon Child and Aditya Ramesh and Daniel M. Ziegler and Jeffrey Wu and Clemens Winter and Christopher Hesse and Mark Chen and Eric Sigler and Mateusz Litwin and Scott Gray and Benjamin Chess and Jack Clark and Christopher Berner and Sam McCandlish and Alec Radford and Ilya Sutskever and Dario Amodei}, year={2020}, eprint={2005.14165}, archivePrefix={arXiv}, primaryClass={cs.CL} }",
"license": "unknown",
"sdLicense": "apache-2.0",
"url": "https://github.com/openai/gpt-3",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/huggingface-c4/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "A colossal, cleaned version of Common Crawl's web crawl corpus.\n\nBased on Common Crawl dataset: \"https://commoncrawl.org\".\n\nThis is the processed version of Google's C4 dataset by AllenAI.\n",
"citation": "\n@article{2019t5,\n author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},\n title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},\n journal = {arXiv e-prints},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.10683},\n}\n",
"license": "odc-by",
"sdLicense": "apache-2.0",
"url": "https://huggingface.co/datasets/c4",
"version": "0.0.0",
"distribution": [
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/huggingface-mnist/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "The MNIST dataset consists of 70,000 28x28 black-and-white images in 10 classes (one for each digits), with 7,000\nimages per class. There are 60,000 training images and 10,000 test images.\n",
"citation": "@article{lecun2010mnist,\n title={MNIST handwritten digit database},\n author={LeCun, Yann and Cortes, Corinna and Burges, CJ},\n journal={ATT Labs [Online]. Available: http://yann.lecun.com/exdb/mnist},\n volume={2},\n year={2010}\n}\n",
"license": "mit",
"sdLicense": "apache-2.0",
"url": "https://huggingface.co/datasets/mnist",
"version": "1.0.0",
"distribution": [
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/movielens/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "Movielens-25M",
"description": "MovieLens 25M movie ratings. Stable benchmark dataset. 25 million ratings and one million tag applications applied to 62,000 movies by 162,000 users. Includes tag genome data with 15 million relevance scores across 1,129 tags. Released 12/2019",
"sdLicense": "apache-2.0",
"url": "https://grouplens.org/datasets/movielens/25m/",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/pass-mini/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "Smaller downloadable version of PASS to be used in unit tests.",
"citation": "None",
"license": "None",
"sdLicense": "apache-2.0",
"url": "None",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/pass/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "PASS is a large-scale image dataset that does not include any humans and which can be used for high-quality pretraining while significantly reducing privacy concerns.",
"citation": "@Article{asano21pass, author = \"Yuki M. Asano and Christian Rupprecht and Andrew Zisserman and Andrea Vedaldi\", title = \"PASS: An ImageNet replacement for self-supervised pretraining without humans\", journal = \"NeurIPS Track on Datasets and Benchmarks\", year = \"2021\" }",
"license": "cc-by-4.0",
"sdLicense": "apache-2.0",
"url": "https://www.robots.ox.ac.uk/~vgg/data/pass/",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/compressed_archive.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "compressed_archive_example",
"description": "This is a fairly minimal example, showing a way to describe archive files.",
"sdLicense": "apache-2.0",
"url": "https://example.com/datasets/recipes/compressed_archive/about",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/enum.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "enum_example",
"description": "This is a fairly minimal example, showing a way to describe enumerations.",
"sdLicense": "apache-2.0",
"url": "https://example.com/datasets/enum/about",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/file_object_in_zip.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "file_object_in_zip",
"description": "Minimal example to read a FileObject contained in a zip.",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/minimal.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,6 @@
"@type": "sc:Dataset",
"name": "minimal_example",
"description": "This is a very minimal example, with only the required fields.",
"sdLicense": "apache-2.0",
"url": "https://example.com/dataset/minimal/about"
}
1 change: 1 addition & 0 deletions datasets/0.8/recipes/minimal_recommended.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"name": "minimal_example_with_recommended_fields",
"description": "This is a minimal example, including the required and the recommended fields.",
"license": "https://creativecommons.org/licenses/by/4.0/",
"sdLicense": "apache-2.0",
"url": "https://example.com/dataset/recipes/minimal-recommended",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/read_binary_file_by_line.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "read_binary_file_by_line",
"description": "This is a recipe illustrating how to read files line by line.",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/read_from_directory.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"@type": "sc:Dataset",
"name": "read_from_directory",
"description": "Minimal example showing how to read from local directories.",
"sdLicense": "apache-2.0",
"url": "https://github.com/mlcommons/croissant",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/read_from_tar.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"@type": "sc:Dataset",
"name": "read_from_tar",
"description": "Example dataset to read several FileSets from a tar.gz and join them.",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/recipes/simple-split.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"name": "simple-split",
"description": "An artificial example dataset defining splits from a CSV column",
"license": "https://creativecommons.org/licenses/by/4.0/",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/simple-join/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"name": "simple-join",
"description": "Example to showcase the use of join.",
"license": "https://creativecommons.org/licenses/by/4.0/",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/simple-parquet/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"name": "simple-parquet",
"description": "Example to read Parquet files.",
"license": "https://creativecommons.org/licenses/by/4.0/",
"sdLicense": "apache-2.0",
"url": "https://mlcommons.org",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/titanic/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
"description": "The original Titanic dataset, describing the status of individual passengers on the Titanic.\n\n The titanic data does not contain information from the crew, but it does contain actual ages of half of the passengers. \n\n For more information about how this dataset was constructed: \nhttps://web.archive.org/web/20200802155940/http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3info.txt\n\nOther useful information (useful for prices description for example):\nhttp://campus.lakeforest.edu/frank/FILES/MLFfiles/Bio150/Titanic/TitanicMETA.pdf\n\n Also see the following article describing shortcomings of the dataset data:\nhttps://emma-stiefel.medium.com/plugging-holes-in-kaggles-titanic-dataset-an-introduction-to-combining-datasets-with-fuzzywuzzy-60a686699da7\n",
"citation": "The principal source for data about Titanic passengers is the Encyclopedia Titanica (http://www.encyclopedia-titanica.org/). The datasets used here were begun by a variety of researchers. One of the original sources is Eaton & Haas (1994) Titanic: Triumph and Tragedy, Patrick Stephens Ltd, which includes a passenger list created by many researchers and edited by Michael A. Findlay.\n\nThomas Cason of UVa has greatly updated and improved the titanic data frame using the Encyclopedia Titanica and created the dataset here. Some duplicate passengers have been dropped, many errors corrected, many missing ages filled in, and new variables created.\n",
"license": "afl-3.0",
"sdLicense": "apache-2.0",
"url": "https://www.openml.org/d/40945",
"version": "1.0.0",
"distribution": [
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/wiki-text/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
"description": "The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n\nCompared to the preprocessed version of Penn Treebank (PTB), WikiText-2 is over 2 times larger and WikiText-103 is over 110 times larger. The WikiText dataset also features a far larger vocabulary and retains the original case, punctuation and numbers - all of which are removed in PTB. As it is composed of full articles, the dataset is well suited for models that can take advantage of long term dependencies.",
"citation": "@article{merity2016pointer, title={Pointer sentinel mixture models}, author={Merity, Stephen and Xiong, Caiming and Bradbury, James and Socher, Richard}, journal={arXiv preprint arXiv:1609.07843}, year={2016} }",
"license": "cc-by-sa-3.0",
"sdLicense": "apache-2.0",
"url": "https://blog.salesforceairesearch.com/the-wikitext-long-term-dependency-language-modeling-dataset/",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/0.8/world-happiness/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"description": "Happiness scored according to economic production, social support, etc.",
"citation": "None",
"license": "cc0-1.0",
"sdLicense": "apache-2.0",
"url": "https://www.kaggle.com/datasets/unsdsn/world-happiness",
"distribution": [
{
Expand Down
7 changes: 4 additions & 3 deletions datasets/1.0/audio_test/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,9 @@
},
"@type": "sc:Dataset",
"name": "audio_test",
"description": "This is the basic test case for audio files",
"conformsTo": "http://mlcommons.org/croissant/1.0",
"description": "This is the basic test case for audio files",
"sdLicense": "apache-2.0",
"url": "None",
"distribution": [
{
Expand All @@ -69,10 +70,10 @@
"description": "These are the sounds.",
"dataType": "sc:AudioObject",
"source": {
"fileSet": "files",
"extract": {
"fileProperty": "content"
},
"fileSet": "files"
}
}
}
]
Expand Down
18 changes: 6 additions & 12 deletions datasets/1.0/bigcode-the-stack/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -47,25 +47,19 @@
"name": "bigcode-the-stack",
"conformsTo": "http://mlcommons.org/croissant/1.0",
"description": "The Stack contains over 6TB of permissively-licensed source code files covering 358 programming languages. The dataset was created as part of the BigCode Project, an open scientific collaboration working on the responsible development of Large Language Models for Code (Code LLMs). The Stack serves as a pre-training dataset for Code LLMs, i.e., code-generating AI systems which enable the synthesis of programs from natural language descriptions as well as other from code snippets.",
"citeAs": "@article{Kocetkov2022TheStack, title={The Stack: 3 TB of permissively licensed source code}, author={Kocetkov, Denis and Li, Raymond and Ben Allal, Loubna and Li, Jia and Mou,Chenghao and Mu\u00f1oz Ferrandis, Carlos and Jernite, Yacine and Mitchell, Margaret and Hughes, Sean and Wolf, Thomas and Bahdanau, Dzmitry and von Werra, Leandro and de Vries, Harm}, journal={Preprint}, year={2022} }",
"creator": [
{
"@type": "Person",
"name": "Harm de Vries",
"email": "harm.devries@servicenow.com"
"@type": "Organization",
"name": "Harm de Vries"
},
{
"@type": "Person",
"name": "Leandro von Werra",
"email": "leandro@huggingface.co"
"@type": "Organization",
"name": "Leandro von Werra"
}
],
"keywords": [
"crowdsourced",
"expert-generated"
],
"citeAs": "@article{Kocetkov2022TheStack, title={The Stack: 3 TB of permissively licensed source code}, author={Kocetkov, Denis and Li, Raymond and Ben Allal, Loubna and Li, Jia and Mou,Chenghao and Mu\u00f1oz Ferrandis, Carlos and Jernite, Yacine and Mitchell, Margaret and Hughes, Sean and Wolf, Thomas and Bahdanau, Dzmitry and von Werra, Leandro and de Vries, Harm}, journal={Preprint}, year={2022} }",
"license": "other",
"sameAs": "https://www.bigcode-project.org/docs/about/the-stack/",
"sdLicense": "apache-2.0",
"url": "https://huggingface.co/datasets/bigcode/the-stack",
"distribution": [
{
Expand Down
1 change: 1 addition & 0 deletions datasets/1.0/coco2014-mini/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
"description": "Smaller downloadable version of COCO to be used in unit tests.",
"citeAs": "None",
"license": "cc-by-4.0",
"sdLicense": "apache-2.0",
"url": "None",
"version": "1.0.0",
"distribution": [
Expand Down
1 change: 1 addition & 0 deletions datasets/1.0/coco2014/metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
"description": "COCO is a large-scale object detection, segmentation, and captioning dataset. WARNING: `metadata.json` is incomplete and does not fully define the COCO2014 dataset. It lacks `recordSet` definitions that would enable automatic loading of all the annotations.",
"citeAs": "@article{DBLP:journals/corr/LinMBHPRDZ14,\n author = {Tsung{-}Yi Lin and\n Michael Maire and\n Serge J. Belongie and\n Lubomir D. Bourdev and\n Ross B. Girshick and\n James Hays and\n Pietro Perona and\n Deva Ramanan and\n Piotr Doll{'{a}}r and\n C. Lawrence Zitnick},\n title = {Microsoft {COCO:} Common Objects in Context},\n journal = {CoRR},\n volume = {abs/1405.0312},\n year = {2014},\n url = {http://arxiv.org/abs/1405.0312},\n archivePrefix = {arXiv},\n eprint = {1405.0312},\n timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}",
"license": "cc-by-4.0",
"sdLicense": "apache-2.0",
"url": "https://cocodataset.org/",
"distribution": [
{
Expand Down
Loading

0 comments on commit cf0437a

Please sign in to comment.