FreeSound_Popularity / batch_split.py
MeysamSh's picture
Add files using upload-large-folder tool
ea3051f verified
import os
import shutil
import pandas as pd
from pathlib import Path
import argparse
import csv
# ===================== CONFIGURATION ===================== #
parser = argparse.ArgumentParser(description="Batch split files and clean metadata.")
parser.add_argument("main_dir", type=str, help="Path to the main directory containing files.")
args = parser.parse_args()
MAIN_DIR = args.main_dir
CSV_PATH = f"{MAIN_DIR}/metadata.csv" # change this
BATCH_SIZE = 9000
# ========================================================== #
# ---- Load metadata ----
main_path = Path(MAIN_DIR)
csv_path = Path(CSV_PATH)
# ===================================================
# STEP 1 — Load metadata safely
# ===================================================
clean_rows = []
bad_rows = []
with open(csv_path, "r", encoding="utf-8", errors="replace", newline="") as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
header = next(reader)
n_cols = len(header)
clean_rows.append(header)
for row in reader:
# remove null bytes
row = [col.replace("\x00", "").replace("\\", "_").replace("/", "_").replace(":", "_")
.replace("*", "_").replace("?", "_").replace("\"", "_").replace("<", "_")
.replace(">", "_").replace("|", "_") for col in row]
if len(row) == n_cols:
clean_rows.append(row)
else:
bad_rows.append(row)
print(f"Header columns: {n_cols}")
print(f"Total valid rows: {len(clean_rows)-1}")
print(f"Bad rows skipped: {len(bad_rows)}")
# Convert CSV to list of dicts for easier manipulation
data = [dict(zip(header, row)) for row in clean_rows[1:]]
# ----------------- Step 2: Remove files not in metadata ----------------- #
metadata_filenames = {row["name"]+".mp3" for row in data} # use the 'name' column
actual_files = {f.name for f in main_path.iterdir() if f.is_file()}
files_to_remove = actual_files - metadata_filenames
for fname in files_to_remove:
print(f"Removing unlisted file: {fname}")
(main_path / fname).unlink()
# refresh actual files
actual_files = {f.name for f in main_path.iterdir() if f.is_file()}
# ----------------- Step 3: Remove metadata rows where file does not exist ----------------- #
data = [row for row in data if row["name"] in actual_files]
# ----------------- Step 4: Split into batches ----------------- #
sorted_files = sorted(actual_files)
file_to_batch = {}
batch_num = 1
for i in range(0, len(sorted_files), BATCH_SIZE):
batch_files = sorted_files[i:i + BATCH_SIZE]
batch_dir = main_path / f"batch_{batch_num:03d}"
batch_dir.mkdir(exist_ok=True)
print(f"Creating {batch_dir} with {len(batch_files)} files")
for fname in batch_files:
src = main_path / fname
dst = batch_dir / fname
shutil.move(str(src), str(dst))
file_to_batch[fname] = batch_num
batch_num += 1
# ----------------- Step 5: Add batch prefix to metadata ----------------- #
for row in data:
bn = file_to_batch.get(row["name"])
if bn is None:
raise ValueError(f"Missing batch info for file: {row['name']}")
row["name"] = f"batch_{bn:03d}/{row['name']}"
# ----------------- Step 6: Save cleaned CSV ----------------- #
output_csv = csv_path.with_name(csv_path.stem + "_clean.csv")
with open(output_csv, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
writer.writerows(data)
# ===================================================
# STEP 6 — Save cleaned metadata CSV
# ===================================================
print(f"Number of lines in cleaned CSV: {len(data)}")
print(f"Cleaned CSV saved to: {output_csv}")