Add files using upload-large-folder tool
Browse files- batch_split.py +108 -0
- clean_dataset.py +119 -0
- dataset_audio_long_0.5_3.0s/batch_001/260_BPM_Snare_Pattern_in_7_4_000.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/300_BPM_Snare_Pattern_in_7_4_000_MCompressor_setting_Master_LuxeVerb_TiledRoom_Weirdness01_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/ANMLFarm_Kuh_Bulle_Agressiv_03_IOKA_NONE_Hofefeld.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Analog_Bass_Sweeps_and_FX_149.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Blast_Explosion.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/CUSTOM_No_Onion.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/C_2_Classic_Burgers.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Charon_C.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Cocky_Nice_try.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/DSGNImpt_Sci_Fimpact13_Whatley_IMPACTS.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Drum_Beat_002_FX_001_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Drum_Beat_002_FX_006_v003_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Dual_shaker_012.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Dual_shaker_015.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Europa_C.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Fair_poultry_Barn_chick_rost_various_001.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Fair_poultry_Barn_chick_rost_various_005.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Glitch_1_Syrin.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/I_R_Bedroom_2_Esperaza_Superlux_ECM_999_Vs_Soundman_OKM1.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Impact_Percs_with_Bass_and_fx_023.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Keys_C_139_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Mauser_C96_Slide_Pull.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Mouse_Click_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Objects_Coin_Roll_2.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Percussion_Improv_Coffee_Grinder_10_Roll.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Ray_Gun_A_01_A.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Snare_Processed_Oneshot_152_14_by_6.5_inch_Brass_Ludwig.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Solid_coffee_thermos_011.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/Surfer_dude_wanna_go_grab_some_drinks.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/UIMisc_Digital_Interface_Message_Selection_Confirmation_Alert_35_JW_Audio_User_Interface.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/UIMisc_Digital_Intreface_Data_Notification_Alert_01_JW_Audio_UI1.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/WD_40_Full_can_Rotation_shake_1.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/WD_40_Lightly_used_can_Tap_6.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/WD_40_less_than_half_Full_Fingernail_tap_center_of_bottle_45degree_incline_heled_at_cap_9_Snappy_.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/XO_Beat_2022_06_13_183751_160bpm_Minimal_Steady_Drive.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/carla_andamISpeakingSteve.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/crash_20_re_Sabian_Vault_Artisan_1.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/footsteps_shuffle_hardwood02.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/lam_sisme.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/metal_shop_foley_060.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/metal_shop_foley_098.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/mongool_vocal_hit.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/monologue_with_dl4_split_245.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/monologue_with_dl4_split_458.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/nina_andDoYouHaveClaimNumber.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_260.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_290.wav.mp3 +3 -0
- dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_298.wav.mp3 +3 -0
batch_split.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import argparse
|
| 6 |
+
import csv
|
| 7 |
+
|
| 8 |
+
# ===================== CONFIGURATION ===================== #
|
| 9 |
+
parser = argparse.ArgumentParser(description="Batch split files and clean metadata.")
|
| 10 |
+
parser.add_argument("main_dir", type=str, help="Path to the main directory containing files.")
|
| 11 |
+
args = parser.parse_args()
|
| 12 |
+
|
| 13 |
+
MAIN_DIR = args.main_dir
|
| 14 |
+
CSV_PATH = f"{MAIN_DIR}/metadata.csv" # change this
|
| 15 |
+
BATCH_SIZE = 9000
|
| 16 |
+
# ========================================================== #
|
| 17 |
+
|
| 18 |
+
# ---- Load metadata ----
|
| 19 |
+
main_path = Path(MAIN_DIR)
|
| 20 |
+
csv_path = Path(CSV_PATH)
|
| 21 |
+
|
| 22 |
+
# ===================================================
|
| 23 |
+
# STEP 1 — Load metadata safely
|
| 24 |
+
# ===================================================
|
| 25 |
+
clean_rows = []
|
| 26 |
+
bad_rows = []
|
| 27 |
+
|
| 28 |
+
with open(csv_path, "r", encoding="utf-8", errors="replace", newline="") as f:
|
| 29 |
+
reader = csv.reader(f, delimiter=",", quotechar='"')
|
| 30 |
+
header = next(reader)
|
| 31 |
+
n_cols = len(header)
|
| 32 |
+
clean_rows.append(header)
|
| 33 |
+
|
| 34 |
+
for row in reader:
|
| 35 |
+
# remove null bytes
|
| 36 |
+
row = [col.replace("\x00", "").replace("\\", "_").replace("/", "_").replace(":", "_")
|
| 37 |
+
.replace("*", "_").replace("?", "_").replace("\"", "_").replace("<", "_")
|
| 38 |
+
.replace(">", "_").replace("|", "_") for col in row]
|
| 39 |
+
|
| 40 |
+
if len(row) == n_cols:
|
| 41 |
+
clean_rows.append(row)
|
| 42 |
+
else:
|
| 43 |
+
bad_rows.append(row)
|
| 44 |
+
|
| 45 |
+
print(f"Header columns: {n_cols}")
|
| 46 |
+
print(f"Total valid rows: {len(clean_rows)-1}")
|
| 47 |
+
print(f"Bad rows skipped: {len(bad_rows)}")
|
| 48 |
+
|
| 49 |
+
# Convert CSV to list of dicts for easier manipulation
|
| 50 |
+
data = [dict(zip(header, row)) for row in clean_rows[1:]]
|
| 51 |
+
|
| 52 |
+
# ----------------- Step 2: Remove files not in metadata ----------------- #
|
| 53 |
+
metadata_filenames = {row["name"]+".mp3" for row in data} # use the 'name' column
|
| 54 |
+
actual_files = {f.name for f in main_path.iterdir() if f.is_file()}
|
| 55 |
+
|
| 56 |
+
files_to_remove = actual_files - metadata_filenames
|
| 57 |
+
for fname in files_to_remove:
|
| 58 |
+
print(f"Removing unlisted file: {fname}")
|
| 59 |
+
(main_path / fname).unlink()
|
| 60 |
+
|
| 61 |
+
# refresh actual files
|
| 62 |
+
actual_files = {f.name for f in main_path.iterdir() if f.is_file()}
|
| 63 |
+
|
| 64 |
+
# ----------------- Step 3: Remove metadata rows where file does not exist ----------------- #
|
| 65 |
+
data = [row for row in data if row["name"] in actual_files]
|
| 66 |
+
|
| 67 |
+
# ----------------- Step 4: Split into batches ----------------- #
|
| 68 |
+
sorted_files = sorted(actual_files)
|
| 69 |
+
file_to_batch = {}
|
| 70 |
+
batch_num = 1
|
| 71 |
+
|
| 72 |
+
for i in range(0, len(sorted_files), BATCH_SIZE):
|
| 73 |
+
batch_files = sorted_files[i:i + BATCH_SIZE]
|
| 74 |
+
batch_dir = main_path / f"batch_{batch_num:03d}"
|
| 75 |
+
batch_dir.mkdir(exist_ok=True)
|
| 76 |
+
print(f"Creating {batch_dir} with {len(batch_files)} files")
|
| 77 |
+
|
| 78 |
+
for fname in batch_files:
|
| 79 |
+
src = main_path / fname
|
| 80 |
+
dst = batch_dir / fname
|
| 81 |
+
shutil.move(str(src), str(dst))
|
| 82 |
+
file_to_batch[fname] = batch_num
|
| 83 |
+
|
| 84 |
+
batch_num += 1
|
| 85 |
+
|
| 86 |
+
# ----------------- Step 5: Add batch prefix to metadata ----------------- #
|
| 87 |
+
for row in data:
|
| 88 |
+
bn = file_to_batch.get(row["name"])
|
| 89 |
+
if bn is None:
|
| 90 |
+
raise ValueError(f"Missing batch info for file: {row['name']}")
|
| 91 |
+
row["name"] = f"batch_{bn:03d}/{row['name']}"
|
| 92 |
+
|
| 93 |
+
# ----------------- Step 6: Save cleaned CSV ----------------- #
|
| 94 |
+
output_csv = csv_path.with_name(csv_path.stem + "_clean.csv")
|
| 95 |
+
|
| 96 |
+
with open(output_csv, "w", newline="", encoding="utf-8") as f:
|
| 97 |
+
writer = csv.DictWriter(f, fieldnames=header)
|
| 98 |
+
writer.writeheader()
|
| 99 |
+
writer.writerows(data)
|
| 100 |
+
|
| 101 |
+
# ===================================================
|
| 102 |
+
# STEP 6 — Save cleaned metadata CSV
|
| 103 |
+
# ===================================================
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
print(f"Number of lines in cleaned CSV: {len(data)}")
|
| 107 |
+
print(f"Cleaned CSV saved to: {output_csv}")
|
| 108 |
+
|
clean_dataset.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import re
|
| 3 |
+
import shutil
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import argparse
|
| 6 |
+
|
| 7 |
+
# ==========================
|
| 8 |
+
# Configuration
|
| 9 |
+
# ==========================
|
| 10 |
+
|
| 11 |
+
parser = argparse.ArgumentParser(description="Clean dataset filenames and metadata.")
|
| 12 |
+
parser.add_argument("dir_path", type=str, help="Path to the source directory containing batches and metadata.csv")
|
| 13 |
+
args = parser.parse_args()
|
| 14 |
+
|
| 15 |
+
dir_path = args.dir_path
|
| 16 |
+
|
| 17 |
+
SOURCE_ROOT = Path(dir_path) # folder containing batch_00x + metadata.csv
|
| 18 |
+
CSV_NAME = "metadata.csv"
|
| 19 |
+
|
| 20 |
+
DEST_ROOT = Path(dir_path+"_clean")
|
| 21 |
+
DEST_CSV_NAME = "metadata.csv"
|
| 22 |
+
|
| 23 |
+
REPLACEMENT_CHAR = "_" # character to replace special characters with
|
| 24 |
+
|
| 25 |
+
# ==========================
|
| 26 |
+
# Filename cleaning function
|
| 27 |
+
# ==========================
|
| 28 |
+
|
| 29 |
+
def clean_filename(filename: str, replacement: str = "_") -> str:
|
| 30 |
+
"""
|
| 31 |
+
Replace special characters in filename while preserving extension.
|
| 32 |
+
Allowed: letters, numbers, dot, underscore, dash
|
| 33 |
+
"""
|
| 34 |
+
p = Path(filename)
|
| 35 |
+
stem = p.stem
|
| 36 |
+
suffix = p.suffix
|
| 37 |
+
|
| 38 |
+
# Replace unwanted characters
|
| 39 |
+
cleaned_stem = re.sub(r"[^A-Za-z0-9._]", replacement, stem)
|
| 40 |
+
|
| 41 |
+
# Collapse multiple replacements
|
| 42 |
+
cleaned_stem = re.sub(rf"{re.escape(replacement)}+", replacement, cleaned_stem)
|
| 43 |
+
|
| 44 |
+
# Avoid empty names
|
| 45 |
+
if not cleaned_stem:
|
| 46 |
+
cleaned_stem = "file"
|
| 47 |
+
|
| 48 |
+
return cleaned_stem + suffix
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# ==========================
|
| 52 |
+
# Main process
|
| 53 |
+
# ==========================
|
| 54 |
+
|
| 55 |
+
def main():
|
| 56 |
+
source_csv_path = SOURCE_ROOT / CSV_NAME
|
| 57 |
+
dest_csv_path = DEST_ROOT / DEST_CSV_NAME
|
| 58 |
+
|
| 59 |
+
DEST_ROOT.mkdir(parents=True, exist_ok=True)
|
| 60 |
+
|
| 61 |
+
# Build mapping: original filename -> cleaned filename
|
| 62 |
+
filename_map = {}
|
| 63 |
+
|
| 64 |
+
rows = []
|
| 65 |
+
|
| 66 |
+
with open(source_csv_path, newline="", encoding="utf-8") as f:
|
| 67 |
+
reader = csv.reader(f)
|
| 68 |
+
# header = next(reader)
|
| 69 |
+
|
| 70 |
+
for row in reader:
|
| 71 |
+
original_name = row[0]
|
| 72 |
+
cleaned_name = clean_filename(original_name, REPLACEMENT_CHAR)
|
| 73 |
+
|
| 74 |
+
filename_map[original_name] = cleaned_name
|
| 75 |
+
|
| 76 |
+
new_row = row.copy()
|
| 77 |
+
new_row[0] = original_name.split("/")[0] +"/" + cleaned_name
|
| 78 |
+
# import pdb; pdb.set_trace()
|
| 79 |
+
rows.append(new_row)
|
| 80 |
+
|
| 81 |
+
# Copy and rename files batch by batch
|
| 82 |
+
for batch_dir in SOURCE_ROOT.iterdir():
|
| 83 |
+
if not batch_dir.is_dir():
|
| 84 |
+
continue
|
| 85 |
+
if not batch_dir.name.startswith("batch_"):
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
dest_batch_dir = DEST_ROOT / batch_dir.name
|
| 89 |
+
dest_batch_dir.mkdir(parents=True, exist_ok=True)
|
| 90 |
+
|
| 91 |
+
for file_path in batch_dir.iterdir():
|
| 92 |
+
if not file_path.is_file():
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
original_name = file_path.name
|
| 96 |
+
|
| 97 |
+
if str(Path(batch_dir.name) / original_name) not in filename_map:
|
| 98 |
+
print(f"WARNING: {str(Path(batch_dir.name) / original_name)} not found in CSV. Skipping.")
|
| 99 |
+
# import pdb; pdb.set_trace()
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
cleaned_name = filename_map[str(Path(batch_dir.name) / original_name)]
|
| 103 |
+
dest_file_path = dest_batch_dir / cleaned_name
|
| 104 |
+
|
| 105 |
+
shutil.copy2(file_path, dest_file_path)
|
| 106 |
+
|
| 107 |
+
# Write cleaned CSV
|
| 108 |
+
with open(dest_csv_path, "w", newline="", encoding="utf-8") as f:
|
| 109 |
+
writer = csv.writer(f)
|
| 110 |
+
# writer.writerow(header)
|
| 111 |
+
writer.writerows(rows)
|
| 112 |
+
|
| 113 |
+
print("Clean dataset created successfully:")
|
| 114 |
+
print(f" Folder: {DEST_ROOT}")
|
| 115 |
+
print(f" CSV: {dest_csv_path}")
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
main()
|
dataset_audio_long_0.5_3.0s/batch_001/260_BPM_Snare_Pattern_in_7_4_000.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79ab9a7b5037daf6b45a8a98c48945c111b3ac54684bdb8ad5684ae663ff7b3c
|
| 3 |
+
size 22936
|
dataset_audio_long_0.5_3.0s/batch_001/300_BPM_Snare_Pattern_in_7_4_000_MCompressor_setting_Master_LuxeVerb_TiledRoom_Weirdness01_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ab53ed74cdf4a9a25d5ca46e1a7752ad546420d9bf380050e99df76becd464d
|
| 3 |
+
size 21827
|
dataset_audio_long_0.5_3.0s/batch_001/ANMLFarm_Kuh_Bulle_Agressiv_03_IOKA_NONE_Hofefeld.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2168f7f586f9d6d256e80ebd337902918fc21eebf010f9d86432a21ae631a51a
|
| 3 |
+
size 12860
|
dataset_audio_long_0.5_3.0s/batch_001/Analog_Bass_Sweeps_and_FX_149.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d694cc2ae48908ed57c52533bee4795eeb714009242f169720c9b254551a4755
|
| 3 |
+
size 24306
|
dataset_audio_long_0.5_3.0s/batch_001/Blast_Explosion.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:812b6c5ab10f953615cee053b82286212f0665456e79f4d003dc8c400b201ab9
|
| 3 |
+
size 5540
|
dataset_audio_long_0.5_3.0s/batch_001/CUSTOM_No_Onion.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7cfa5b8c9d6ce621bb3b2f6d6a2c5135f8d5e6a60f37af361ad7a2ba6cbf4541
|
| 3 |
+
size 16791
|
dataset_audio_long_0.5_3.0s/batch_001/C_2_Classic_Burgers.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3166d833b143247326c13e979f950bc30f479bdb23604e18eef6a6edf761a647
|
| 3 |
+
size 12966
|
dataset_audio_long_0.5_3.0s/batch_001/Charon_C.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad7d16659494e761b700e790392ecce7b153585a7eaa7e65a58504e850a117de
|
| 3 |
+
size 11336
|
dataset_audio_long_0.5_3.0s/batch_001/Cocky_Nice_try.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef7999ab20a2dfbade32e2d08db0f1fa656ecaf4ba71131da634ff914b590d62
|
| 3 |
+
size 9032
|
dataset_audio_long_0.5_3.0s/batch_001/DSGNImpt_Sci_Fimpact13_Whatley_IMPACTS.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:714809ccefcbf2ee88f6c5fa8da7ad69d1c76724c670e28085435dec95154c7a
|
| 3 |
+
size 8803
|
dataset_audio_long_0.5_3.0s/batch_001/Drum_Beat_002_FX_001_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1acfb053773e4847f9d4634b2daa7d38ead7f2d4642623a8217dd89d0f05f90e
|
| 3 |
+
size 17744
|
dataset_audio_long_0.5_3.0s/batch_001/Drum_Beat_002_FX_006_v003_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d699c696dc65b70e707e6a259577ead02e859a615b23ca5c397ebb29c9735802
|
| 3 |
+
size 16212
|
dataset_audio_long_0.5_3.0s/batch_001/Dual_shaker_012.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d96c77c0a2a861660050d8788db7e864f4e8f022743f8f9d91a60dc8110ee2d0
|
| 3 |
+
size 10489
|
dataset_audio_long_0.5_3.0s/batch_001/Dual_shaker_015.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3fa2562f98880bfd83fed46ea5c983bb8b3dac89a2444a95335c853bbbd3492
|
| 3 |
+
size 11993
|
dataset_audio_long_0.5_3.0s/batch_001/Europa_C.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aafee063e7976eb4fe34a0d1f1401317c280c6d4a18a7e4ceb80483c4bcbc365
|
| 3 |
+
size 10247
|
dataset_audio_long_0.5_3.0s/batch_001/Fair_poultry_Barn_chick_rost_various_001.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a73ef7c137a7ffd9071f0326b52c245039dc17e72030fe37de5705fc1f21f66
|
| 3 |
+
size 19754
|
dataset_audio_long_0.5_3.0s/batch_001/Fair_poultry_Barn_chick_rost_various_005.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00a6e462e5178534bb3c8b4ecbc93f1adb264eb2d93dded41c8e9a64009584e2
|
| 3 |
+
size 14758
|
dataset_audio_long_0.5_3.0s/batch_001/Glitch_1_Syrin.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fb3b2da0646b5655d51c0bab77961b91e9b4b5bd898787863e37eb35679ad40
|
| 3 |
+
size 27440
|
dataset_audio_long_0.5_3.0s/batch_001/I_R_Bedroom_2_Esperaza_Superlux_ECM_999_Vs_Soundman_OKM1.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ee86e7afc81250d6dea4ab06f0903ac1c78fda6f6492f39461e4bde8b958aca
|
| 3 |
+
size 14668
|
dataset_audio_long_0.5_3.0s/batch_001/Impact_Percs_with_Bass_and_fx_023.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7325e628575e273a612c09694de2e9dc3e279ece548f4806d61fb059a189fbf2
|
| 3 |
+
size 22013
|
dataset_audio_long_0.5_3.0s/batch_001/Keys_C_139_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63daedc9d5b4d43fbc10f142ad46da6384edaaac678489d02f8b3013960c51e4
|
| 3 |
+
size 8012
|
dataset_audio_long_0.5_3.0s/batch_001/Mauser_C96_Slide_Pull.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ad4eee0e681f5faef0055e4470c0a477cfd0924e10dfde42c598b211fa259c4
|
| 3 |
+
size 8658
|
dataset_audio_long_0.5_3.0s/batch_001/Mouse_Click_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba193748408609c5651994014e8cf25a96d97bd87adca3803f1290055dfa29df
|
| 3 |
+
size 2936
|
dataset_audio_long_0.5_3.0s/batch_001/Objects_Coin_Roll_2.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81ce8e1a627ee32c2b09afaef31ac6d9f2cdad68e3e3f771a614a822c41ea053
|
| 3 |
+
size 12435
|
dataset_audio_long_0.5_3.0s/batch_001/Percussion_Improv_Coffee_Grinder_10_Roll.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82b2c58582b14d7aaeaf80fb7650e91e0141bd3ef5b63934766a5e8b22034870
|
| 3 |
+
size 13542
|
dataset_audio_long_0.5_3.0s/batch_001/Ray_Gun_A_01_A.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:308d62d1ff7b61040ae51b7a2dce0f3f191b33c0bc9414c7eb35c89cb12154d7
|
| 3 |
+
size 7379
|
dataset_audio_long_0.5_3.0s/batch_001/Snare_Processed_Oneshot_152_14_by_6.5_inch_Brass_Ludwig.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b5623147c6ee096ce71af5ee513ec15cff337aee7082609feb1e0776a2d306a
|
| 3 |
+
size 15058
|
dataset_audio_long_0.5_3.0s/batch_001/Solid_coffee_thermos_011.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd7530a859d33efa0390d547758d5b589768bb1e567f763be0338c22f677f36c
|
| 3 |
+
size 6409
|
dataset_audio_long_0.5_3.0s/batch_001/Surfer_dude_wanna_go_grab_some_drinks.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b4563236a1b2006a6ddb8335ab79b332f174198c128452b1746c0a1a46e4367
|
| 3 |
+
size 14884
|
dataset_audio_long_0.5_3.0s/batch_001/UIMisc_Digital_Interface_Message_Selection_Confirmation_Alert_35_JW_Audio_User_Interface.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fae68f066c8e03b3b8d464a43966472c50e75952e3b1ba0d0584332b8d088a35
|
| 3 |
+
size 5470
|
dataset_audio_long_0.5_3.0s/batch_001/UIMisc_Digital_Intreface_Data_Notification_Alert_01_JW_Audio_UI1.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07012cd459c4108ee2cb523b886f7d8b5ba9faf7deecbc4d9145926c292c4465
|
| 3 |
+
size 4799
|
dataset_audio_long_0.5_3.0s/batch_001/WD_40_Full_can_Rotation_shake_1.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:352b0db15927ff6887fe502e8d19b1aefc3b5278ef6240b01da32918d9512d5e
|
| 3 |
+
size 27288
|
dataset_audio_long_0.5_3.0s/batch_001/WD_40_Lightly_used_can_Tap_6.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0a98c6b353f90563e1c3ce4d4ac153ebe0b5e911dae0495d57d57efcaaa9732
|
| 3 |
+
size 8479
|
dataset_audio_long_0.5_3.0s/batch_001/WD_40_less_than_half_Full_Fingernail_tap_center_of_bottle_45degree_incline_heled_at_cap_9_Snappy_.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f831b85de6925c9627631ae6b2052132fcbc5df4422151eef25657d681bc12ce
|
| 3 |
+
size 6761
|
dataset_audio_long_0.5_3.0s/batch_001/XO_Beat_2022_06_13_183751_160bpm_Minimal_Steady_Drive.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55c660e742c434b0938a76649773edcb97cb4a082860c20ce730a353f03de95b
|
| 3 |
+
size 27462
|
dataset_audio_long_0.5_3.0s/batch_001/carla_andamISpeakingSteve.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:571314defc8e4d5833ce72791100b73972549e4c14a3c9b53b0a0425ce7e69c4
|
| 3 |
+
size 18510
|
dataset_audio_long_0.5_3.0s/batch_001/crash_20_re_Sabian_Vault_Artisan_1.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7d8cab7bf934139a76b3835357ad4b57fe4afd6685bc6b716dd7412b1640bd3
|
| 3 |
+
size 5806
|
dataset_audio_long_0.5_3.0s/batch_001/footsteps_shuffle_hardwood02.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b53e8b12cb837906bd2bc436a90413bce65e598e4a4f435d5117289f7c5a1bd
|
| 3 |
+
size 12736
|
dataset_audio_long_0.5_3.0s/batch_001/lam_sisme.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78539e22f0c9116c1a98d9eaf7efaf6d9cece7d88006ae4fdc6d1034317172bf
|
| 3 |
+
size 15959
|
dataset_audio_long_0.5_3.0s/batch_001/metal_shop_foley_060.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51dcabe8d752e0de79ef1fc24ddc49022c5b2ce137756494a283d02214644a6c
|
| 3 |
+
size 23428
|
dataset_audio_long_0.5_3.0s/batch_001/metal_shop_foley_098.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fc04c64085dab4309ec2ffcbd6b06b1cd4b59ca1a25b874c5d217f441381436
|
| 3 |
+
size 10859
|
dataset_audio_long_0.5_3.0s/batch_001/mongool_vocal_hit.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f29de8280f6a2f949089620a356e871169829476242afc77cc27a49cd91bf6e5
|
| 3 |
+
size 12808
|
dataset_audio_long_0.5_3.0s/batch_001/monologue_with_dl4_split_245.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c92d9ba494236b63164583fe368fa41e0e6e836173853a248995abebd27c84c
|
| 3 |
+
size 28268
|
dataset_audio_long_0.5_3.0s/batch_001/monologue_with_dl4_split_458.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21b079da1a2b9ad94ad5f4f37b98e643ff1122fcc916ae712239084cf62319ad
|
| 3 |
+
size 23853
|
dataset_audio_long_0.5_3.0s/batch_001/nina_andDoYouHaveClaimNumber.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84f5300bdf8ef69d5047810b3d9b58dc63d7f4bbbad34d92b537ab79747a614a
|
| 3 |
+
size 18798
|
dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_260.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba2b156ad5c32b2eddf7e4d354ec7fdf5f8df1e67a628469a05d3d3a20891700
|
| 3 |
+
size 6188
|
dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_290.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6baf3581cc734ff07c144715905f509caf4117113b5cf17e91bbfa7623893f2c
|
| 3 |
+
size 8993
|
dataset_audio_long_0.5_3.0s/batch_001/snares_by_CVLTIV8R_298.wav.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:707ad16263d0c93e8de5a4d2e23a2c0387e1287ff7049b3e375e02953a815371
|
| 3 |
+
size 25244
|