python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth' p...
ViT-Adapter-main
detection/configs/upgraded_mask_rcnn/mask_rcnn_mae_adapter_base_lsj_fpn_50ep_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] drop_path_rate = 0.4 model = dict( type='HybridTaskCascadeAug', backbone=dict( type='ViTAdapter', img_size=38...
ViT-Adapter-main
detection/configs/htc++/htc++_augreg_adapter_large_fpn_3x_coco_ms.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth' pretr...
ViT-Adapter-main
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_3x_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] NUM_CLASSES = 80 drop_path_rate = 0.3 # 0.4 (pre-train) -> 0.3 (fine-tune) # https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/htc++_beitv2_adapter_large_fpn_o365.pth load_fr...
ViT-Adapter-main
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_o365_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] NUM_CLASSES = 80 drop_path_rate = 0.3 # 0.4 (pre-train) -> 0.3 (fine-tune) model = dict( type='HybridTaskCascadeAug', backbone=dict( type='BEiTAdapter', img_size=2...
ViT-Adapter-main
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_o365_coco_ms.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] model = dict( type='HybridTaskCascadeAug', backbone=dict( type='BEiTAdapter', img_size=224, patch_siz...
ViT-Adapter-main
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco_ms.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth' pretrain...
ViT-Adapter-main
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco_old.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2...
ViT-Adapter-main
detection/configs/htc++/htc++_augreg_adapter_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] model = dict( type='HybridTaskCascadeAug', backbone=dict( type='BEiTAdapter', img_size=224, patch_siz...
ViT-Adapter-main
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_3x_coco_ms.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth' pretrain...
ViT-Adapter-main
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] pretrained = 'pretrained/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth' drop_path_rate = 0.4 model = dict( type=...
ViT-Adapter-main
detection/configs/htc++/htc++_uniperceiver_adapter_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16...
ViT-Adapter-main
detection/configs/atss/atss_deit_adapter_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16...
ViT-Adapter-main
detection/configs/gfl/gfl_deit_adapter_small_fpn_3x_coco.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/test.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/setup.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/functions/ms_deform_attn_func.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/functions/__init__.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/modules/ms_deform_attn.py
# ------------------------------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # -------------------------------------------------------------------------...
ViT-Adapter-main
detection/ops/modules/__init__.py
import torch from qwen.model import QwenVL #usage img = torch.randn(1, 3, 256, 256) caption = torch.randint(0, 20000, (1, 1024)) model = QwenVL() output = model(img, caption) print(output.shape)
Qwen-VL-main
example.py
from qwen.inference import QwenVLChat qwen_chat = QwenVLChat(model_name="Qwen/Qwen-VL-Chat", device_map="cuda") response = qwen_chat.chat([ {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"}, {"text": "这是什么?"} ]) print(response) response = qwen_chat.chat("框出图中击掌的位置") print(...
Qwen-VL-main
inference.py
from qwen.model import QwenVL, QwenVLTokenizer from qwen.train import CFG, Train from qwen.inference import QwenVLChat
Qwen-VL-main
qwen/__init__.py
import torch import torch.nn as nn from transformers import AutoTokenizer, CLIPProcessor from qwen.transformer import ( Decoder, Encoder, Transformer, ViTransformerWrapper, AutoregressiveWrapper ) class QwenVLTokenizer: def __init__(self): try: self.processor = CLIPProce...
Qwen-VL-main
qwen/model.py
from functools import partial from typing import Optional import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange # constants Efficie...
Qwen-VL-main
qwen/attend.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, ...
Qwen-VL-main
qwen/utils.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction # constants from math import ceil from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unp...
Qwen-VL-main
qwen/transformer.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch ########### SETUP CONFIG import torch.distributed as dist from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import Acceler...
Qwen-VL-main
qwen/train.py
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig import torch class QwenVLChat: def __init__(self, model_name, device_map="cuda", trust_remote_code=True, bf16=False, ...
Qwen-VL-main
qwen/inference.py
import torch from cm3.model import CM3 #usage img = torch.randn(1, 3, 256, 256) caption = torch.randint(0, 20000, (1, 1024)) model = CM3() output = model(img, caption) print(output.shape) # (1, 1024, 20000)
CM3Leon-main
example.py
from cm3.model import CM3Tokenizer, CM3
CM3Leon-main
cm3/__init__.py
import logging import torch from torch import nn from torch.nn import Module from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import AutoTokenizer, CLIPProcessor from zeta.nn.architecture.transformer import Decoder, Encoder, Transformer, ViTransformerWrapper from zeta.nn.arch...
CM3Leon-main
cm3/model.py
CM3Leon-main
cm3/transformer.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch from torch.distributed.fsdp import ( FullyShardedDataParallel, MixedPrecision, BackwardPrefetch, ShardingStrategy, ) from accelerate import Accelerator from...
CM3Leon-main
cm3/train.py
import multiprocessing import argparse from itertools import chain from datasets import load_dataset from cm3.model import CM3LEONTokenizer class CFG: SEED: int = 42 SEQ_LEN: int = 8192 NUM_CPU: int = multiprocessing.cpu_count() HF_ACCOUNT_REPO: str = "YOUR HUGGINGFACE API KEY" DATASET_NAME: str =...
CM3Leon-main
cm3/tokenize.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, ...
CM3Leon-main
cm3/utils/stable_adamw.py
CM3Leon-main
cm3/utils/__init__.py
import unittest import torch from softmax_one.softmax_one import ScaledDotProductAttention class TestScaledDotProductAttention(unittest.TestCase): def setUp(self): self.module = ScaledDotProductAttention(dropout=0.1) self.q = torch.rand(16, 10, 64) #16 batches 10 queries of size 64 self.k ...
AttentionIsOFFByOne-main
test.py
import torch from softmax_one.softmax_one import softmax_one x = torch.randn(5) y = softmax_one(x, dim=0) print(y) print(y.shape)
AttentionIsOFFByOne-main
example.py
import time import torch import argparse import torch.nn.functional as F import matplotlib.pyplot as plt # from softmax_one.softmax_one_cupy import softmax_one_cupy as softmax_one from softmax_one.softmax_one import softmax_one import numpy as np import logging def benchmark(func, x, dim): start = time.time() ...
AttentionIsOFFByOne-main
tests/benchmark.py
from setuptools import setup, Extension from torch.utils import cpp_extension softmax_one_cpp = Extension( name="softmax_one_cpp", sources=["softmax_one/optimized/softmax_one.cpp", "softmax_one/optimized/binding.cpp"], include_dirs=["sotmax_one/include"], extra_compile_args=["-std=c++14"] ) setup( ...
AttentionIsOFFByOne-main
tests/setup.py
import math import torch import torch.nn as nn import torch.nn.functional as F from softmax_one.softmax_one import softmax_one # QuietAttention class QuietAttention(nn.Module): def __init__(self, dropout=0.0): super().__init__() self.dropout = nn.Droput(dropout) def forward(self, q, k, v, ma...
AttentionIsOFFByOne-main
softmax_one/attention.py
import math import torch import torch.nn.functional as F # Define the softmax_one function with added one in the denominator , which helps to reduce #the negative impact impact of tiny values in the softmax function and improves numerical stability def softmax_one(x, dim=None, _stacklevel=3, dtype=None): #subtract...
AttentionIsOFFByOne-main
softmax_one/softmax_one.py
from softmax_one.softmax_one import softmax_one, ScaledDotProductAttention
AttentionIsOFFByOne-main
softmax_one/__init__.py
#cupy allows you to compile raw python code into cuda, this a test import cupy as cp #softmax def softmax_one_cupy(x, axis=None): #substract the max for stability x = x - cp.max(x, axis=axis, keepdims=True) #compute exponentials exp_x = cp.exp(x) #compute the softmax values and add one in the de...
AttentionIsOFFByOne-main
optimized/softmax_one_cupy.py
import json from datetime import datetime import re def dmy_to_ymd(d): return datetime.strptime(d, '%d %b %Y').strftime('%Y-%m-%d') with open('../README.md', 'r') as f: lines = f.readlines() # remove empty line lines = [line.strip() for line in lines if line.strip()] st = lines.index('# Resources...
EXA-1-master
exa/papers/Awesome-Diffusion-Models/website/convert_resource.py
import json from datetime import datetime def dmy_to_ymd(d): return datetime.strptime(d, '%d %b %Y').strftime('%Y-%m-%d') with open('../README.md', 'r') as f: lines = f.readlines() # remove empty line lines = [line.strip() for line in lines if line.strip()] idx = lines.index('# Papers') line...
EXA-1-master
exa/papers/Awesome-Diffusion-Models/website/convert.py
import itertools from jinja2 import Template import json DOC_DIR = '../docs' class Link: def __init__(self, name, href): self.name = name self.href = href class Paper: def __init__(self, data): self.title = data['title'] self.authors = data['authors'] self.source = ...
EXA-1-master
exa/papers/Awesome-Diffusion-Models/website/main.py
# -*- coding: utf-8 -*- import argparse import logging import pprint from gensim.models import word2vec logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) parser = argparse.ArgumentParser(description='gensim skip-gram with negative sampling') parser.add_argument('--is_train',...
EXA-1-master
exa/papers/awesome-embedding-models/examples/baseline.py
# -*- coding: utf-8 -*- import os import zipfile from keras.utils.data_utils import get_file def maybe_download(url): """ Download a file if not present. """ filename = url.split('/')[-1] path = get_file(filename, url) return path def read_data(filename): """ Extract the first file ...
EXA-1-master
exa/papers/awesome-embedding-models/examples/utils.py
# -*- coding: utf-8 -*- import pprint from keras.utils.data_utils import get_file from keras.utils import np_utils from keras.preprocessing.text import Tokenizer, base_filter from keras.preprocessing.sequence import skipgrams from keras.models import Sequential from keras.layers import Dense from gensim.models.doc2vec ...
EXA-1-master
exa/papers/awesome-embedding-models/examples/skip-gram.py
# -*- coding: utf-8 -*- import argparse import sys import numpy as np from gensim.models import word2vec from gensim.models.doc2vec import Word2Vec from keras.layers import Activation, Embedding, Merge, Reshape from keras.models import Sequential from keras.preprocessing.sequence import skipgrams, make_sampling_table ...
EXA-1-master
exa/papers/awesome-embedding-models/examples/skip-gram_with_ns.py
# -*- coding:utf-8 -*- import re from pprint import pprint import requests from bs4 import BeautifulSoup def get_html(url): try: html = requests.get(url).text except Exception as e: print('web requests url error: {}\nlink: {}'.format(e, url)) return html class WebDownloader(object): ...
EXA-1-master
exa/papers/Awesome-Multimodal-Research-master/scripts/WebDownloader.py
# -*- coding:utf-8 -*- import re import requests import urllib.request import os import argparse parser = argparse.ArgumentParser(description="pull_paper") parser.add_argument('--keyword', type=str, default='Multimodal') # Match the keywords we want to find the paper args = parser.parse_args() # get web context r =...
EXA-1-master
exa/papers/Awesome-Multimodal-Research-master/scripts/pull_paper.py
import os import pandas as pd from tqdm import tqdm BASE_URL="https://archive.org/download/stackexchange/" table = pd.read_html(BASE_URL)[0] sources = [x.replace(" (View Contents)", "") for x in table['Name'].tolist()] sources = [x for x in sources if x.endswith(".7z")] for source in tqdm(sources): # if ".meta." ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/download.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/__init__.py
import os import json LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./data/") if __name__ == "__main__": with open(os.path.join(LEMMA_DATA_DIR_SE_OUT,"token_counts", "tokens.json"), "r") as f: counts = json.load(f) ''' print a table of the counts ''' print("|Idx|Site|Tok...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/print_stats.py
import os import json import tiktoken from multiprocessing import Pool from transformers import AutoTokenizer # enc = tiktoken.get_encoding("r50k_base") enc = AutoTokenizer.from_pretrained( "EleutherAI/pythia-6.9b-deduped", # "gpt2" ) def get_token_count(qa_pair): # return len(enc.encode(qa_pair['text'])) ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/token_count.py
import os import json import sys import xml.etree.ElementTree as ET from tqdm import tqdm sys.path.append("./") from src.stack_exchange.count import get_sites_count LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/") if os.path.exists(os.path.join(LEMMA_DATA_DIR_SE, "counts.json")): with open(os.pa...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/filter.py
import re import os import sys import json import fasttext from bs4 import BeautifulSoup from multiprocessing import Pool sys.path.append("./") site_name = "" CLEANR = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});') def cleanhtml(raw_html): raw_html = raw_html.replace("<li>", "\n*") raw_html = ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/post_processing.py
import os import json from tqdm import tqdm import xml.etree.ElementTree as ET LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/stack_exchange/") def get_sites_count(path=LEMMA_DATA_DIR_SE): sites = os.listdir(path) sites = [x for x in sites if x.endswith(".xml")] counts = {} for site i...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/count.py
import argparse from datasets import load_dataset import pathlib parser = argparse.ArgumentParser() parser.add_argument("--data_dir", type=str, default=None, help="Path to the wikipedia data directory.") args = parser.parse_args() LANGUAGES = [ "bg", "ca", "cs", "da", "de", "en", "es", "fr", "...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/wiki/download.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/wiki/__init__.py
import os import json from multiprocessing import Pool from transformers import AutoTokenizer print("start loading!") enc = AutoTokenizer.from_pretrained( "EleutherAI/pythia-6.9b-deduped", ) print("end loading!") def get_token_count(qa_pair): return len(enc.tokenize(qa_pair['text'])) LEMMA_DATA_DIR_SE_OUT = "....
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/wiki/token_count.py
import os import json LEMMA_DATA_DIR_SE_OUT = "./data/wikipedia/" LEMMA_DATA_SAVE_DIR = "./data/wikipedia/wiki-full.jsonl" files = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))] files.sort() with open(LEMMA_DATA_SAVE_DIR, "w") as fw: for fil...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/wiki/convert_format.py
import argparse import hashlib import gzip import json import re import uuid from datetime import datetime from typing import Dict, Union import pathlib parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, default=None) parser.add_argument('--target_dir', type=str, default="...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/github_clean_dedup_local.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/__init__.py
import argparse from datetime import datetime import json import multiprocessing as mp import os import gzip from transformers import AutoTokenizer import pathlib parser = argparse.ArgumentParser() parser.add_argument('--data_file', type=str, default=None) parser.add_argument('--target_dir', type=str, default=None) a...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/github_run_filter.py
import argparse import os from transformers import AutoTokenizer import json import multiprocessing as mp import pathlib from datetime import datetime parser = argparse.ArgumentParser() parser.add_argument('--data_file', type=str, default=None) args = parser.parse_args() tokenizer = AutoTokenizer.from_pretrained("Ele...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/github_token_count.py
import argparse import json from datetime import datetime from typing import Dict import pathlib parser = argparse.ArgumentParser() parser.add_argument('--first_step_dir', type=str, default=None) parser.add_argument('--target_dir', type=str, default=None) args = parser.parse_args() def get_timestamp() -> str: r...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/github_global_dedup.py
import argparse import json from datetime import datetime import pathlib parser = argparse.ArgumentParser() parser.add_argument( '--first_step_dir', type=str, default="./data/github/processed_v3" ) parser.add_argument( '--input', type=str, default="data/github/processed_v3/run_ce60fbbc14684ed8b6590548...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/github/github_merge_dedup.py
from datasets import load_dataset book_dataset = load_dataset("the_pile_books3") for split, dataset in book_dataset.items(): dataset.to_json(f"./data/book/books3-{split}.jsonl") pg19_dataset = load_dataset("pg19") for split, dataset in pg19_dataset.items(): dataset.to_json(f"./data/book/pg19-{split}.jsonl")
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/book/download.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/book/__init__.py
# Copyright 2023 Ontocord.ai, Together Computer, ETH Zürich, Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unles...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/book/dedup.py
import os import json from multiprocessing import Pool from transformers import AutoTokenizer enc = AutoTokenizer.from_pretrained( "EleutherAI/pythia-6.9b-deduped", ) def get_token_count(qa_pair): return len(enc.tokenize(qa_pair['text'])) LEMMA_DATA_DIR_SE_OUT = "./data/book/" sites = [x for x in os.listdir(o...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/book/token_count.py
import argparse from datetime import datetime import json import gzip import os import pathlib import joblib from joblib import Parallel, delayed parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default="./data/c4/en") parser.add_argument('--output_dir', type=str, default="./data/c4/proce...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/c4/c4_reformat.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/c4/__init__.py
import argparse import boto3 from botocore.exceptions import ClientError import configparser import itertools import numpy as np import pathlib parser = argparse.ArgumentParser() parser.add_argument('--aws_config', type=str, help='aws config file') parser.add_argument('--target_dir', type=str, default="./data/arxiv") ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/arxiv/run_download.py
import concurrent.futures from datetime import datetime import fasttext import json import pathlib import tarfile from typing import List, Tuple, Dict, Union import gzip import tempfile import uuid import re from utils import predict_lang, get_timestamp, format_arxiv_id # suppress fasttext warning fasttext.FastText.e...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/arxiv/arxiv_cleaner.py
from datetime import datetime import fasttext import re from typing import List, Tuple def get_timestamp() -> str: return datetime.now().isoformat() def predict_lang( text: str, lang_model: fasttext.FastText._FastText, k=5 ) -> Tuple[List[str], List[float]]: r""" Predict top-k languages of text. ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/arxiv/utils.py
import argparse import os from collections import defaultdict from datetime import datetime from transformers import AutoTokenizer import json import multiprocessing as mp import pathlib import pandas as pd from tabulate import tabulate parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, def...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/arxiv/token_count.py
import argparse import os import uuid import numpy as np import pathlib import tempfile from typing import List import joblib from arxiv_cleaner import ArxivCleaner parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default="./data/arxiv/src") parser.add_argument('--target_dir', type=str,...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/arxiv/run_clean.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from setuptools import setup # type: ignore setup( name="cc_net", version="1.0.0", pa...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Main script to download a CC dump, remove duplicates, split by language and filter the documents. The pipeline parameters are described...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/mine.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Creates mono-lingual corpus from Wikipedia. """ import functools import re import subprocess import urllib.request from pathlib import ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/get_wiki_cirrus.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Manipulate files containing one json per line. """ import argparse import collections import contextlib import functools import glob imp...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/jsonql.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import functools import itertools import logging import os import sys import time import warnings from pathlib import Path from typing impor...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/execution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import sys import time import warnings from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type import numpy as np HASH_TYPE: T...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/flat_hash_set.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import base64 import hashlib import itertools import urllib.parse from pathlib import Path from typing import Dict, Iterable, List, Optional...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/minify.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import re import unicodedata UNICODE_PUNCT = { ",": ",", "。": ".", "、": ",", "„": '"', "”": '"', "“": '"', "«":...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/text_normalizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import logging import subprocess from pathlib import Path from typing import List import func_argparse import numpy as np from cc_net impo...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/regroup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import argparse import time from pathlib import Path from typing import Dict, List, Optional, Sequence, Tuple, Union import kenlm # type: ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/perplexity.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import time from typing import Dict, Optional import sacremoses # type: ignore from cc_net import jsonql, text_normalizer class RobustT...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Tools to remove duplicate paragraphs across one or several shards. """ import argparse import gc import hashlib import logging import m...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/dedup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import contextlib import functools import logging import re import tempfile import time import urllib.request from pathlib import Path from ...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/process_wet_file.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import func_argparse import cc_net.mine def main(): func_argparse.parse_and_call(cc_net.mine.get_main_parser()) if __name__ == "__...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/__main__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import collections from pathlib import Path from typing import Dict, Optional import fasttext # type: igno...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/split_by_lang.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import contextlib import functools import gzip import logging import multiprocessing from collections import defaultdict from pathlib import...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/dl_cc_100.py
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ This code is used to train a fastText classifier to label document with DMOZ categories. The data, distributed under the cc-by 3.0 lice...
EXA-1-master
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/make_dmoz_corpus.py