Load_dataset from local file only test set

Hi, I am trying to use Imagenet-1k dataset.
I downloaded only the test set from huggingface hub already in my disk
I am trying to load the test set on python but it is giving me an DatasetGenerationError.
I think the problem here is that it is trying to load train, val sets.
Can I load only the testset from local files?

import datasets
data_path = 'C:/Users/99san/Workspace/data/imagenet-1k/data/imagenet_1k_test'
dataset = datasets.load_dataset("csv",data_files=data_path, split="test")
{
	"name": "DatasetGenerationError",
	"message": "An error occurred while generating the dataset",
	"stack": "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mUnicodeDecodeError\u001b[0m                        Traceback (most recent call last)\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\builder.py:1925\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m   1924\u001b[0m _time \u001b[39m=\u001b[39m time\u001b[39m.\u001b[39mtime()\n\u001b[1;32m-> 1925\u001b[0m \u001b[39mfor\u001b[39;00m _, table \u001b[39min\u001b[39;00m generator:\n\u001b[0;32m   1926\u001b[0m     \u001b[39mif\u001b[39;00m max_shard_size \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m writer\u001b[39m.\u001b[39m_num_bytes \u001b[39m>\u001b[39m max_shard_size:\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\packaged_modules\\csv\\csv.py:185\u001b[0m, in \u001b[0;36mCsv._generate_tables\u001b[1;34m(self, files)\u001b[0m\n\u001b[0;32m    184\u001b[0m \u001b[39mfor\u001b[39;00m file_idx, file \u001b[39min\u001b[39;00m \u001b[39menumerate\u001b[39m(itertools\u001b[39m.\u001b[39mchain\u001b[39m.\u001b[39mfrom_iterable(files)):\n\u001b[1;32m--> 185\u001b[0m     csv_file_reader \u001b[39m=\u001b[39m pd\u001b[39m.\u001b[39;49mread_csv(file, iterator\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m, dtype\u001b[39m=\u001b[39;49mdtype, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mconfig\u001b[39m.\u001b[39;49mpd_read_csv_kwargs)\n\u001b[0;32m    186\u001b[0m     \u001b[39mtry\u001b[39;00m:\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\streaming.py:74\u001b[0m, in \u001b[0;36mextend_module_for_streaming.<locals>.wrap_auth.<locals>.wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m     72\u001b[0m \u001b[39m@wraps\u001b[39m(function)\n\u001b[0;32m     73\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mwrapper\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m---> 74\u001b[0m     \u001b[39mreturn\u001b[39;00m function(\u001b[39m*\u001b[39;49margs, download_config\u001b[39m=\u001b[39;49mdownload_config, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\download\\streaming_download_manager.py:765\u001b[0m, in \u001b[0;36mxpandas_read_csv\u001b[1;34m(filepath_or_buffer, download_config, **kwargs)\u001b[0m\n\u001b[0;32m    764\u001b[0m     kwargs[\u001b[39m\"\u001b[39m\u001b[39mcompression\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m=\u001b[39m _get_extraction_protocol(filepath_or_buffer, download_config\u001b[39m=\u001b[39mdownload_config)\n\u001b[1;32m--> 765\u001b[0m \u001b[39mreturn\u001b[39;00m pd\u001b[39m.\u001b[39;49mread_csv(xopen(filepath_or_buffer, \u001b[39m\"\u001b[39;49m\u001b[39mrb\u001b[39;49m\u001b[39m\"\u001b[39;49m, download_config\u001b[39m=\u001b[39;49mdownload_config), \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:912\u001b[0m, in \u001b[0;36mread_csv\u001b[1;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)\u001b[0m\n\u001b[0;32m    910\u001b[0m kwds\u001b[39m.\u001b[39mupdate(kwds_defaults)\n\u001b[1;32m--> 912\u001b[0m \u001b[39mreturn\u001b[39;00m _read(filepath_or_buffer, kwds)\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:577\u001b[0m, in \u001b[0;36m_read\u001b[1;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[0;32m    576\u001b[0m \u001b[39m# Create the parser.\u001b[39;00m\n\u001b[1;32m--> 577\u001b[0m parser \u001b[39m=\u001b[39m TextFileReader(filepath_or_buffer, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwds)\n\u001b[0;32m    579\u001b[0m \u001b[39mif\u001b[39;00m chunksize \u001b[39mor\u001b[39;00m iterator:\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:1407\u001b[0m, in \u001b[0;36mTextFileReader.__init__\u001b[1;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[0;32m   1406\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhandles: IOHandles \u001b[39m|\u001b[39m \u001b[39mNone\u001b[39;00m \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m-> 1407\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_engine \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_make_engine(f, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mengine)\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:1679\u001b[0m, in \u001b[0;36mTextFileReader._make_engine\u001b[1;34m(self, f, engine)\u001b[0m\n\u001b[0;32m   1678\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m-> 1679\u001b[0m     \u001b[39mreturn\u001b[39;00m mapping[engine](f, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions)\n\u001b[0;32m   1680\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\io\\parsers\\c_parser_wrapper.py:93\u001b[0m, in \u001b[0;36mCParserWrapper.__init__\u001b[1;34m(self, src, **kwds)\u001b[0m\n\u001b[0;32m     92\u001b[0m     import_optional_dependency(\u001b[39m\"\u001b[39m\u001b[39mpyarrow\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m---> 93\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_reader \u001b[39m=\u001b[39m parsers\u001b[39m.\u001b[39;49mTextReader(src, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwds)\n\u001b[0;32m     95\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39munnamed_cols \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_reader\u001b[39m.\u001b[39munnamed_cols\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\_libs\\parsers.pyx:548\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader.__cinit__\u001b[1;34m()\u001b[0m\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\_libs\\parsers.pyx:637\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._get_header\u001b[1;34m()\u001b[0m\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\_libs\\parsers.pyx:848\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._tokenize_rows\u001b[1;34m()\u001b[0m\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\_libs\\parsers.pyx:859\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._check_tokenize_status\u001b[1;34m()\u001b[0m\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\pandas\\_libs\\parsers.pyx:2017\u001b[0m, in \u001b[0;36mpandas._libs.parsers.raise_parser_error\u001b[1;34m()\u001b[0m\n\n\u001b[1;31mUnicodeDecodeError\u001b[0m: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte\n\nThe above exception was the direct cause of the following exception:\n\n\u001b[1;31mDatasetGenerationError\u001b[0m                    Traceback (most recent call last)\n\u001b[1;32mc:\\Users\\99san\\Workspace\\ViT\\test.ipynb Cell 3\u001b[0m line \u001b[0;36m4\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/99san/Workspace/ViT/test.ipynb#W1sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mdatasets\u001b[39;00m\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/99san/Workspace/ViT/test.ipynb#W1sZmlsZQ%3D%3D?line=1'>2</a>\u001b[0m data_path \u001b[39m=\u001b[39m \u001b[39m'\u001b[39m\u001b[39mC:/Users/99san/Workspace/data/imagenet-1k/data/imagenet_1k_test\u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m----> <a href='vscode-notebook-cell:/c%3A/Users/99san/Workspace/ViT/test.ipynb#W1sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m dataset \u001b[39m=\u001b[39m datasets\u001b[39m.\u001b[39;49mload_dataset(\u001b[39m\"\u001b[39;49m\u001b[39mcsv\u001b[39;49m\u001b[39m\"\u001b[39;49m,data_files\u001b[39m=\u001b[39;49mdata_path, split\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mtest\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\load.py:2153\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)\u001b[0m\n\u001b[0;32m   2150\u001b[0m try_from_hf_gcs \u001b[39m=\u001b[39m path \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m   2152\u001b[0m \u001b[39m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2153\u001b[0m builder_instance\u001b[39m.\u001b[39;49mdownload_and_prepare(\n\u001b[0;32m   2154\u001b[0m     download_config\u001b[39m=\u001b[39;49mdownload_config,\n\u001b[0;32m   2155\u001b[0m     download_mode\u001b[39m=\u001b[39;49mdownload_mode,\n\u001b[0;32m   2156\u001b[0m     verification_mode\u001b[39m=\u001b[39;49mverification_mode,\n\u001b[0;32m   2157\u001b[0m     try_from_hf_gcs\u001b[39m=\u001b[39;49mtry_from_hf_gcs,\n\u001b[0;32m   2158\u001b[0m     num_proc\u001b[39m=\u001b[39;49mnum_proc,\n\u001b[0;32m   2159\u001b[0m     storage_options\u001b[39m=\u001b[39;49mstorage_options,\n\u001b[0;32m   2160\u001b[0m )\n\u001b[0;32m   2162\u001b[0m \u001b[39m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m   2163\u001b[0m keep_in_memory \u001b[39m=\u001b[39m (\n\u001b[0;32m   2164\u001b[0m     keep_in_memory \u001b[39mif\u001b[39;00m keep_in_memory \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39melse\u001b[39;00m is_small_dataset(builder_instance\u001b[39m.\u001b[39minfo\u001b[39m.\u001b[39mdataset_size)\n\u001b[0;32m   2165\u001b[0m )\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\builder.py:954\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m    952\u001b[0m     \u001b[39mif\u001b[39;00m num_proc \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m    953\u001b[0m         prepare_split_kwargs[\u001b[39m\"\u001b[39m\u001b[39mnum_proc\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m=\u001b[39m num_proc\n\u001b[1;32m--> 954\u001b[0m     \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_download_and_prepare(\n\u001b[0;32m    955\u001b[0m         dl_manager\u001b[39m=\u001b[39;49mdl_manager,\n\u001b[0;32m    956\u001b[0m         verification_mode\u001b[39m=\u001b[39;49mverification_mode,\n\u001b[0;32m    957\u001b[0m         \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mprepare_split_kwargs,\n\u001b[0;32m    958\u001b[0m         \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mdownload_and_prepare_kwargs,\n\u001b[0;32m    959\u001b[0m     )\n\u001b[0;32m    960\u001b[0m \u001b[39m# Sync info\u001b[39;00m\n\u001b[0;32m    961\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39minfo\u001b[39m.\u001b[39mdataset_size \u001b[39m=\u001b[39m \u001b[39msum\u001b[39m(split\u001b[39m.\u001b[39mnum_bytes \u001b[39mfor\u001b[39;00m split \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39minfo\u001b[39m.\u001b[39msplits\u001b[39m.\u001b[39mvalues())\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\builder.py:1049\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[0;32m   1045\u001b[0m split_dict\u001b[39m.\u001b[39madd(split_generator\u001b[39m.\u001b[39msplit_info)\n\u001b[0;32m   1047\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m   1048\u001b[0m     \u001b[39m# Prepare split will record examples associated to the split\u001b[39;00m\n\u001b[1;32m-> 1049\u001b[0m     \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_prepare_split(split_generator, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mprepare_split_kwargs)\n\u001b[0;32m   1050\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mOSError\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[0;32m   1051\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mOSError\u001b[39;00m(\n\u001b[0;32m   1052\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39mCannot find data file. \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m   1053\u001b[0m         \u001b[39m+\u001b[39m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmanual_download_instructions \u001b[39mor\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m   1054\u001b[0m         \u001b[39m+\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39mOriginal error:\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39m\"\u001b[39m\n\u001b[0;32m   1055\u001b[0m         \u001b[39m+\u001b[39m \u001b[39mstr\u001b[39m(e)\n\u001b[0;32m   1056\u001b[0m     ) \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39m\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\builder.py:1813\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split\u001b[1;34m(self, split_generator, file_format, num_proc, max_shard_size)\u001b[0m\n\u001b[0;32m   1811\u001b[0m job_id \u001b[39m=\u001b[39m \u001b[39m0\u001b[39m\n\u001b[0;32m   1812\u001b[0m \u001b[39mwith\u001b[39;00m pbar:\n\u001b[1;32m-> 1813\u001b[0m     \u001b[39mfor\u001b[39;00m job_id, done, content \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_prepare_split_single(\n\u001b[0;32m   1814\u001b[0m         gen_kwargs\u001b[39m=\u001b[39mgen_kwargs, job_id\u001b[39m=\u001b[39mjob_id, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39m_prepare_split_args\n\u001b[0;32m   1815\u001b[0m     ):\n\u001b[0;32m   1816\u001b[0m         \u001b[39mif\u001b[39;00m done:\n\u001b[0;32m   1817\u001b[0m             result \u001b[39m=\u001b[39m content\n\nFile \u001b[1;32mc:\\Python311\\Lib\\site-packages\\datasets\\builder.py:1958\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m   1956\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(e, SchemaInferenceError) \u001b[39mand\u001b[39;00m e\u001b[39m.\u001b[39m__context__ \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m   1957\u001b[0m         e \u001b[39m=\u001b[39m e\u001b[39m.\u001b[39m__context__\n\u001b[1;32m-> 1958\u001b[0m     \u001b[39mraise\u001b[39;00m DatasetGenerationError(\u001b[39m\"\u001b[39m\u001b[39mAn error occurred while generating the dataset\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39mfrom\u001b[39;00m \u001b[39me\u001b[39;00m\n\u001b[0;32m   1960\u001b[0m \u001b[39myield\u001b[39;00m job_id, \u001b[39mTrue\u001b[39;00m, (total_num_examples, total_num_bytes, writer\u001b[39m.\u001b[39m_features, num_shards, shard_lengths)\n\n\u001b[1;31mDatasetGenerationError\u001b[0m: An error occurred while generating the dataset"
}