python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='STDCContextPathNet',
backbone_cfg=dict(
type='STDCNet',
stdc_type='STDCNet1',
in_channels=3,
channels=(32, 64, 256, 512, 1... | ViT-Adapter-main | segmentation/configs/_base_/models/stdc.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, int... | ViT-Adapter-main | segmentation/configs/_base_/schedules/schedule_160k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=80000)
checkpoint_config = dict(by_epoch=False, inte... | ViT-Adapter-main | segmentation/configs/_base_/schedules/schedule_80k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=320000)
checkpoint_config = dict(by_epoch=False, int... | ViT-Adapter-main | segmentation/configs/_base_/schedules/schedule_320k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=40000)
checkpoint_config = dict(by_epoch=False, inte... | ViT-Adapter-main | segmentation/configs/_base_/schedules/schedule_40k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=20000)
checkpoint_config = dict(by_epoch=False, inte... | ViT-Adapter-main | segmentation/configs/_base_/schedules/schedule_20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windo... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/mask2former_beit_adapter_base_512_40k_cocostuff10k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-sh... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/upernet_beit_adapter_large_512_80k_cocostuff10k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windo... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/mask2former_beit_adapter_large_512_40k_cocostuff10k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-sh... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/upernet_beit_adapter_large_512_80k_cocostuff10k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windo... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/mask2former_beit_adapter_large_512_40k_cocostuff10k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff10k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windo... | ViT-Adapter-main | segmentation/configs/coco_stuff10k/mask2former_beit_adapter_base_512_40k_cocostuff10k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beit_adapter_large_896_80k_ade20k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'
pretrained = 'pret... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_deit_adapter_small_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-shar... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beit_adapter_large_640_160k_ade20k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-pu... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_beit_large_512_160k_ade20k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
pretrained = 'pretrained/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth'
model = dict(
... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_uniperceiver_adapter_large_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beitv2_adapter_large_896_80k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.1.6/L_16-i21k-300ep-lr_0.001-aug... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_augreg_adapter_large_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-pu... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_beit_adapter_large_640_160k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'
pretrained = 'pretr... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_deit_adapter_light_base_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'
pretrained = 'pretr... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_deit_adapter_base_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-pu... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_beit_adapter_large_640_160k_ade20k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/B_16-i21k-300ep-lr_0.001-aug... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_augreg_adapter_base_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beit_adapter_large_896_80k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'
pretrained = 'pretr... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_deit_adapter_tiny_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-shar... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beitv2_adapter_large_896_160k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-shar... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beit_adapter_large_640_160k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-pu... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_beit_large_512_160k_ade20k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
# pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/Ti_16-i21k-300ep-lr_0.001-au... | ViT-Adapter-main | segmentation/configs/ade20k/upernet_augreg_adapter_tiny_512_160k_ade20k.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit.py',
'../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share... | ViT-Adapter-main | segmentation/configs/ade20k/mask2former_beitv2_adapter_large_896_80k_ade20k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_potsdam.py',
'../_base_/datasets/potsdam.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (512, 512)
# pretrained = 'https://conversationhub.blob.core.windows.net/b... | ViT-Adapter-main | segmentation/configs/potsdam/mask2former_beit_adapter_large_512_80k_potsdam_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_chase_db1.py',
'../_base_/datasets/chase_db1.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (128, 128)
img_scale = (960, 999)
# pretrained = 'https://conversation... | ViT-Adapter-main | segmentation/configs/chase_db1/mask2former_beit_adapter_large_128_40k_chase_db1_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff164k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.wind... | ViT-Adapter-main | segmentation/configs/coco_stuff164k/mask2former_beitv2_adapter_large_896_80k_cocostuff164k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff164k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.wind... | ViT-Adapter-main | segmentation/configs/coco_stuff164k/mask2former_beit_adapter_large_896_80k_cocostuff164k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/coco-stuff164k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-s... | ViT-Adapter-main | segmentation/configs/coco_stuff164k/upernet_beit_adapter_large_640_80k_cocostuff164k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/coco-stuff164k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (640, 640)
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-s... | ViT-Adapter-main | segmentation/configs/coco_stuff164k/upernet_beit_adapter_large_640_80k_cocostuff164k_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cocostuff.py',
'../_base_/datasets/coco-stuff164k.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.wind... | ViT-Adapter-main | segmentation/configs/coco_stuff164k/mask2former_beit_adapter_large_896_80k_cocostuff164k_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://conversationhub.bl... | ViT-Adapter-main | segmentation/configs/pascal_context/upernet_beit_adapter_large_480_80k_pascal_context_59_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_pascal.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://convers... | ViT-Adapter-main | segmentation/configs/pascal_context/mask2former_beit_adapter_base_480_40k_pascal_context_59_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_pascal.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://convers... | ViT-Adapter-main | segmentation/configs/pascal_context/mask2former_beit_adapter_large_480_40k_pascal_context_59_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_pascal.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://convers... | ViT-Adapter-main | segmentation/configs/pascal_context/mask2former_beit_adapter_base_480_40k_pascal_context_59_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_pascal.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://convers... | ViT-Adapter-main | segmentation/configs/pascal_context/mask2former_beit_adapter_large_480_40k_pascal_context_59_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/upernet_beit.py',
'../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (480, 480)
img_scale = (520, 520)
# pretrained = 'https://conversationhub.bl... | ViT-Adapter-main | segmentation/configs/pascal_context/upernet_beit_adapter_large_480_80k_pascal_context_59_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cityscapes.py',
'../_base_/datasets/cityscapes_896x896.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core... | ViT-Adapter-main | segmentation/configs/cityscapes/mask2former_beit_adapter_large_896_80k_cityscapes_ms.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cityscapes.py',
'../_base_/datasets/mapillary_896x896.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core.... | ViT-Adapter-main | segmentation/configs/cityscapes/mask2former_beit_adapter_large_896_80k_mapillary_ss.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask2former_beit_cityscapes.py',
'../_base_/datasets/cityscapes_896x896.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (896, 896)
# pretrained = 'https://conversationhub.blob.core... | ViT-Adapter-main | segmentation/configs/cityscapes/mask2former_beit_adapter_large_896_80k_cityscapes_ss.py |
import torch
import argparse
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('filename', nargs='?', type=str, default=None)
args = parser.parse_args()
model = torch.load(args.filename, map_location=torch.device('cpu'))
print(model.keys())
state_dict = model['state_dict']
new_state_di... | ViT-Adapter-main | wsdm2023/release.py |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel impo... | ViT-Adapter-main | wsdm2023/test.py |
import pandas as pd
from mmdet.apis import init_detector
import torch
from mmcv.parallel import collate, scatter
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmcv.ops import RoIPool
import argparse
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F... | ViT-Adapter-main | wsdm2023/generate_results.py |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_d... | ViT-Adapter-main | wsdm2023/train.py |
import cv2
import argparse
import torch
import json
from torchvision.utils import draw_bounding_boxes
from torch.utils.tensorboard import SummaryWriter
def xywh2xyxy(bbox):
x, y, w, h = [int(val) for val in bbox]
return [x, y, x+w, y+h]
def draw_bboxes(img_url, pred, gt, args):
img = cv2.imread(img_url)... | ViT-Adapter-main | wsdm2023/tools/drawbbox.py |
import json
import pandas as pd
import datetime
def load_dataset(name):
csv_path = f'data/wsdm2023/annotations/{name}.csv'
dataset = pd.read_csv(csv_path)
# img_path = f'/home/data2/gaoshengyi/datasets/wsdm2023/{name}'
# if not os.path.exists(img_path):
# os.mkdir(img_path)
# for img_url ... | ViT-Adapter-main | wsdm2023/tools/csv2coco.py |
import torch
from mmdet_custom.models.backbones.base.uniperceiver import UnifiedBertEncoder
checkpoint = torch.load("pretrained/uni-perceiver-large-L24-H1024-224size-pretrained.pth", map_location=torch.device('cpu'))
checkpoint = checkpoint['model']
new_checkpoint = {}
for k, v in checkpoint.items():
new_k = k.rep... | ViT-Adapter-main | wsdm2023/tools/convertor.py |
import torch
from parrot import Parrot
import json
import pandas
import argparse
import warnings
warnings.filterwarnings("ignore")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('csv', type=str, help='csv file path')
parser.add_argument('out', type=str, help='output json file pat... | ViT-Adapter-main | wsdm2023/tools/paraphrase.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .models import * # noqa: F401,F403
from .datasets import *
from .apis import *
| ViT-Adapter-main | wsdm2023/mmdet_custom/__init__.py |
from .pipeline import LoadRefer, TokenizeRefer, RandomParaPhrase, RandomFlipWithRefer
__all__ = ['LoadRefer', 'TokenizeRefer',
'RandomParaPhrase', 'RandomFlipWithRefer']
| ViT-Adapter-main | wsdm2023/mmdet_custom/apis/__init__.py |
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import RandomFlip
from mmdet_custom.models.utils.tokenization import ClipTokenizer
import torch
import json
import numpy as np
@PIPELINES.register_module()
class RandomFlipWithRefer(RandomFlip):
# only allow horizontal flip
def __init_... | ViT-Adapter-main | wsdm2023/mmdet_custom/apis/pipeline.py |
from .wsdm2023_coco import WSDMCocoDataset
from .vg_dataset import VGDataset
__all__ = ['WSDMCocoDataset','VGDataset']
| ViT-Adapter-main | wsdm2023/mmdet_custom/datasets/__init__.py |
import json
import numpy as np
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.custom import CustomDataset
from collections import OrderedDict
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmcv.utils import print_log
@DATASETS.register_module()
class VGDataset(CustomDataset):
... | ViT-Adapter-main | wsdm2023/mmdet_custom/datasets/vg_dataset.py |
# Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import io
import itertools
import json
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from m... | ViT-Adapter-main | wsdm2023/mmdet_custom/datasets/wsdm2023_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .utils import * # noqa: F401,F403
| ViT-Adapter-main | wsdm2023/mmdet_custom/models/__init__.py |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, multi_apply,
reduce_mean)
from ..utils import build_dn_generator
from mmdet.models.utils.transformer import invers... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/dense_heads/dino_head.py |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Linear, bias_init_with_prob, constant_init
from mmcv.runner import force_fp32
from mmdet.core import multi_apply
from mmdet.models.utils.transformer import inverse_sigmoi... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/dense_heads/deformable_detr_head.py |
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
__all__ = ['DeformableDETRHead', 'DETRHead', 'DINOHead']
| ViT-Adapter-main | wsdm2023/mmdet_custom/models/dense_heads/__init__.py |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, Linear, build_activation_layer
from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding
from mmcv.runner import force_fp32
from mmdet.core import (bbox_cxcywh_to... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/dense_heads/detr_head.py |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
def get_uncertainty(mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `cla... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/point_sample.py |
from .query_denoising import build_dn_generator
from .transformer import DinoTransformer, DinoTransformerDecoder
from .point_sample import get_uncertainty, get_uncertain_point_coords_with_randomness
__all__ = ['build_dn_generator', 'DinoTransformer', 'DinoTransformerDecoder',
'get_uncertainty', 'get_uncerta... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/__init__.py |
import math
import torch
import torch.nn as nn
from mmdet.models.utils.builder import TRANSFORMER
from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER_SEQUENCE,
FEEDFORWARD_NETWORK,
DROPOUT_LAYERS)
from mmdet.models.utils.transformer import ... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/transformer.py |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import BaseModule
from mmdet.core import bbox_xyxy_to_cxcywh
from mmdet.models.utils.transformer import inverse_sigmoid
class DnQueryGenerator(BaseModule):
def __init__(self,
num_queries,
hidden_dim,
... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/query_denoising.py |
from .builder import build_tokenizer
from .tokenization_clip import ClipTokenizer
| ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/tokenization/__init__.py |
from .tokenization_clip import MaskClipTokenizer
def build_tokenizer(tokenizer):
if tokenizer['name']=='clip_tokenizer':
return MaskClipTokenizer(tokenizer['max_sent_len']) | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/tokenization/builder.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
import torch
from torch._C import Value
import pandas as pd
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/utils/tokenization/tokenization_clip.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .uniperceiver_adapter import UniPerceiverAdapter
__all__ = ['UniPerceiverAdapter']
| ViT-Adapter-main | wsdm2023/mmdet_custom/models/backbones/__init__.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from ops.modules import MSDeformAttn
from timm.models.layers import DropPath, trunc_normal_
from torch.nn.init import normal_
f... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/backbones/uniperceiver_adapter.py |
import logging
from functools import partial
import torch
import torch.nn as nn
from ops.modules import MSDeformAttn
from timm.models.layers import DropPath
import torch.utils.checkpoint as cp
_logger = logging.getLogger(__name__)
def get_reference_points(spatial_shapes, device):
reference_points_list = []
... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/backbones/adapter_modules.py |
import logging
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from timm.models.layers import DropPath
from torch import nn
def window_partition(x, window_size):
"""
Args:
x: (B... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/backbones/base/uniperceiver.py |
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from timm.models.layers import DropPath, Mlp
class GroundingAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False,
attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/backbones/base/grounding_block.py |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import DETECTORS
from mmdet.models.detectors.detr import DETR
from mmdet.core import (bbox2result, bbox_cxcywh_to_xyxy,
bbox_xyxy_to_cxcywh, bbox_flip)
from mmdet.core.bbox.iou_calculators import BboxOverlaps2D
import torc... | ViT-Adapter-main | wsdm2023/mmdet_custom/models/detectors/grounding_dino.py |
from .grounding_dino import GroundingDINO
__all__ = ['GroundingDINO']
| ViT-Adapter-main | wsdm2023/mmdet_custom/models/detectors/__init__.py |
# Copyright (c) ByteDance, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Mostly copy-paste from BEiT library:
https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_cus... | ViT-Adapter-main | wsdm2023/mmcv_custom/layer_decay_optimizer_constructor.py |
# Copyright (c) Open-MMLab. All rights reserved.
import io
import math
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import mmcv
import numpy as np
import torch
import torchvisio... | ViT-Adapter-main | wsdm2023/mmcv_custom/checkpoint.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .checkpoint import load_checkpoint
from .customized_text import CustomizedTextLoggerHook
from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor
__all__ = [
'LayerDecayOptimizerConstructor', 'CustomizedTextLoggerHook',
'load_check... | ViT-Adapter-main | wsdm2023/mmcv_custom/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from collections import OrderedDict
import torch
from mmcv.runner import HOOKS, TextLoggerHook
@HOOKS.... | ViT-Adapter-main | wsdm2023/mmcv_custom/customized_text.py |
_base_ = [
'_base_/datasets/wsdm2023_trainval.py',
'_base_/default_runtime.py'
]
# https://github.com/czczup/ViT-Adapter/releases/download/wsdm2023/dino_4scale_uniperceiver_adapter_large_6ep_gqa.pth
load_from = 'pretrained/dino_4scale_uniperceiver_adapter_large_6ep_gqa.pth'
model = dict(
type='GroundingDINO... | ViT-Adapter-main | wsdm2023/configs/dino_4scale_uniperceiver_adapter_large_24ep_gqa_wsdm2023_trainval.py |
_base_ = [
'_base_/datasets/wsdm2023.py',
'_base_/default_runtime.py'
]
# https://github.com/czczup/ViT-Adapter/releases/download/wsdm2023/dino_4scale_uniperceiver_adapter_base_6ep_gqa.pth
load_from = 'pretrained/dino_4scale_uniperceiver_adapter_base_6ep_gqa.pth'
model = dict(
type='GroundingDINO',
with... | ViT-Adapter-main | wsdm2023/configs/dino_4scale_uniperceiver_adapter_base_24ep_gqa_wsdm2023.py |
_base_ = [
'_base_/datasets/grounding_gqa.py',
'_base_/default_runtime.py'
]
# https://github.com/czczup/ViT-Adapter/releases/download/wsdm2023/uni-perceiver-base-L12-H768-224size-torch-pretrained_converted.pth
pretrained = 'pretrained/uni-perceiver-base-L12-H768-224size-torch-pretrained_converted.pth'
model = ... | ViT-Adapter-main | wsdm2023/configs/dino_4scale_uniperceiver_adapter_base_6ep_gqa.py |
_base_ = [
'_base_/datasets/grounding_gqa.py',
'_base_/default_runtime.py'
]
# https://github.com/czczup/ViT-Adapter/releases/download/wsdm2023/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth
pretrained = 'pretrained/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth'
model = dict(
... | ViT-Adapter-main | wsdm2023/configs/dino_4scale_uniperceiver_adapter_large_6ep_gqa.py |
_base_ = [
'_base_/datasets/wsdm2023.py',
'_base_/default_runtime.py'
]
# https://github.com/czczup/ViT-Adapter/releases/download/wsdm2023/dino_4scale_uniperceiver_adapter_large_6ep_gqa.pth
load_from = 'pretrained/dino_4scale_uniperceiver_adapter_large_6ep_gqa.pth'
model = dict(
type='GroundingDINO',
wi... | ViT-Adapter-main | wsdm2023/configs/dino_4scale_uniperceiver_adapter_large_24ep_gqa_wsdm2023.py |
# Copyright (c) OpenMMLab. All rights reserved.
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(b... | ViT-Adapter-main | wsdm2023/configs/_base_/default_runtime.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'WIDERFaceDataset'
data_root = 'data/WIDERFace/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/wider_face.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'WSDMCocoDataset'
data_root = 'data/wsdm2023/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/wsdm2023.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', ... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/cityscapes_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=Tr... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/coco_detection.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'WSDMCocoDataset'
data_root = 'data/wsdm2023/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/wsdm2023_trainval.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(samples_per_gpu=2,
workers_per_gpu=2,
train=dict(_delete_=True,
type='ClassBalancedDataset',
... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/lvis_v0.5_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'VGDataset'
data_root = 'data/grounding_gqa/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/grounding_gqa.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(samples_per_gpu=2,
workers_per_gpu=2,
train=dict(_delete_=True,
type='ClassBalancedDataset',
... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/lvis_v1_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', ... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/cityscapes_detection.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=Tr... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/coco_instance.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.