|
|
import os |
|
|
import shutil |
|
|
import pandas as pd |
|
|
from pathlib import Path |
|
|
import argparse |
|
|
import csv |
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="Batch split files and clean metadata.") |
|
|
parser.add_argument("main_dir", type=str, help="Path to the main directory containing files.") |
|
|
args = parser.parse_args() |
|
|
|
|
|
MAIN_DIR = args.main_dir |
|
|
CSV_PATH = f"{MAIN_DIR}/metadata.csv" |
|
|
BATCH_SIZE = 9000 |
|
|
|
|
|
|
|
|
|
|
|
main_path = Path(MAIN_DIR) |
|
|
csv_path = Path(CSV_PATH) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
clean_rows = [] |
|
|
bad_rows = [] |
|
|
|
|
|
with open(csv_path, "r", encoding="utf-8", errors="replace", newline="") as f: |
|
|
reader = csv.reader(f, delimiter=",", quotechar='"') |
|
|
header = next(reader) |
|
|
n_cols = len(header) |
|
|
clean_rows.append(header) |
|
|
|
|
|
for row in reader: |
|
|
|
|
|
row = [col.replace("\x00", "").replace("\\", "_").replace("/", "_").replace(":", "_") |
|
|
.replace("*", "_").replace("?", "_").replace("\"", "_").replace("<", "_") |
|
|
.replace(">", "_").replace("|", "_") for col in row] |
|
|
|
|
|
if len(row) == n_cols: |
|
|
clean_rows.append(row) |
|
|
else: |
|
|
bad_rows.append(row) |
|
|
|
|
|
print(f"Header columns: {n_cols}") |
|
|
print(f"Total valid rows: {len(clean_rows)-1}") |
|
|
print(f"Bad rows skipped: {len(bad_rows)}") |
|
|
|
|
|
|
|
|
data = [dict(zip(header, row)) for row in clean_rows[1:]] |
|
|
|
|
|
|
|
|
metadata_filenames = {row["name"]+".mp3" for row in data} |
|
|
actual_files = {f.name for f in main_path.iterdir() if f.is_file()} |
|
|
|
|
|
files_to_remove = actual_files - metadata_filenames |
|
|
for fname in files_to_remove: |
|
|
print(f"Removing unlisted file: {fname}") |
|
|
(main_path / fname).unlink() |
|
|
|
|
|
|
|
|
actual_files = {f.name for f in main_path.iterdir() if f.is_file()} |
|
|
|
|
|
|
|
|
data = [row for row in data if row["name"] in actual_files] |
|
|
|
|
|
|
|
|
sorted_files = sorted(actual_files) |
|
|
file_to_batch = {} |
|
|
batch_num = 1 |
|
|
|
|
|
for i in range(0, len(sorted_files), BATCH_SIZE): |
|
|
batch_files = sorted_files[i:i + BATCH_SIZE] |
|
|
batch_dir = main_path / f"batch_{batch_num:03d}" |
|
|
batch_dir.mkdir(exist_ok=True) |
|
|
print(f"Creating {batch_dir} with {len(batch_files)} files") |
|
|
|
|
|
for fname in batch_files: |
|
|
src = main_path / fname |
|
|
dst = batch_dir / fname |
|
|
shutil.move(str(src), str(dst)) |
|
|
file_to_batch[fname] = batch_num |
|
|
|
|
|
batch_num += 1 |
|
|
|
|
|
|
|
|
for row in data: |
|
|
bn = file_to_batch.get(row["name"]) |
|
|
if bn is None: |
|
|
raise ValueError(f"Missing batch info for file: {row['name']}") |
|
|
row["name"] = f"batch_{bn:03d}/{row['name']}" |
|
|
|
|
|
|
|
|
output_csv = csv_path.with_name(csv_path.stem + "_clean.csv") |
|
|
|
|
|
with open(output_csv, "w", newline="", encoding="utf-8") as f: |
|
|
writer = csv.DictWriter(f, fieldnames=header) |
|
|
writer.writeheader() |
|
|
writer.writerows(data) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"Number of lines in cleaned CSV: {len(data)}") |
|
|
print(f"Cleaned CSV saved to: {output_csv}") |
|
|
|
|
|
|