python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, normal_init, xavier_init) from mmcv.cnn.bricks.transformer import (build_positional_encoding, ...
ViT-Adapter-main
segmentation/mmseg_custom/models/plugins/msdeformattn_pixel_decoder.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder __all__ = [ 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixelDecoder' ]
ViT-Adapter-main
segmentation/mmseg_custom/models/plugins/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import point_sample def get_uncertainty(mask_pred, labels): """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_pred' for the foreground class in `cla...
ViT-Adapter-main
segmentation/mmseg_custom/models/utils/point_sample.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .assigner import MaskHungarianAssigner from .point_sample import get_uncertain_point_coords_with_randomness from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .transformer import (DetrTran...
ViT-Adapter-main
segmentation/mmseg_custom/models/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings from typing import Sequence import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, build_norm_layer, ...
ViT-Adapter-main
segmentation/mmseg_custom/models/utils/transformer.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING from mmcv.runner import BaseModule @POSITIONAL_ENCODING.register_module() class SinePositionalEncoding(BaseModule): """Position encoding with sine and cosine ...
ViT-Adapter-main
segmentation/mmseg_custom/models/utils/positional_encoding.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn.functional as F from ..builder import MASK_ASSIGNERS, build_match_cost try: from scipy.optimize import linear_sum_assignment except ImportError: linear_sum_assignment = None class AssignResu...
ViT-Adapter-main
segmentation/mmseg_custom/models/utils/assigner.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/beit_baseline.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .beit_adapter import BEiTAdapter from .beit_baseline import BEiTBaseline from .vit_adapter import ViTAdapter from .vit_baseline import ViTBaseline from .uniperceiver_adapter import UniPerceiverAdapter __all__ = ['ViTBaseline', 'ViTAdapter', 'BEiTAdapter', ...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/__init__.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from mmseg.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import trunc_normal_ from torch.nn.init import normal_ from .base....
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/vit_adapter.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from mmseg.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import DropPath, trunc_normal_ from t...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/beit_adapter.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from mmseg.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import DropPath, trunc_normal_ from torch.nn.init import normal_ f...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/uniperceiver_adapter.py
import logging from functools import partial import torch import torch.nn as nn import torch.utils.checkpoint as cp from ops.modules import MSDeformAttn from timm.models.layers import DropPath _logger = logging.getLogger(__name__) def get_reference_points(spatial_shapes, device): reference_points_list = [] ...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/adapter_modules.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch.nn as nn import torch.nn.functional as F from mmcv.runner import load_checkpoint from mmseg.models.builder import BACKBONES from mmseg.utils import get_root_logger from timm.models.layers import trunc_normal_ from .base.vit ...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/vit_baseline.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/base/beit.py
import logging import math import torch import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.runner import load_checkpoint from mmseg.utils import get_root_logger from timm.models.layers import DropPath from torch import nn def window_partition(x, window_size): """ Args: x: (...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/base/uniperceiver.py
"""Vision Transformer (ViT) in PyTorch. A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https:...
ViT-Adapter-main
segmentation/mmseg_custom/models/backbones/base/vit.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmseg.core import add_prefix from mmseg.models import builder from mmseg.models.builder import SEGMENTORS from mmseg.models.segmentors.base import BaseSegmentor from mmseg.ops import resize @SEGMENT...
ViT-Adapter-main
segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former.py
# Copyright (c) OpenMMLab. All rights reserved. from .encoder_decoder_mask2former import EncoderDecoderMask2Former from .encoder_decoder_mask2former_aug import EncoderDecoderMask2FormerAug __all__ = ['EncoderDecoderMask2Former', 'EncoderDecoderMask2FormerAug']
ViT-Adapter-main
segmentation/mmseg_custom/models/segmentors/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmseg.core import add_prefix from mmseg.models import builder from mmseg.models.builder import SEGMENTORS from mmseg.models.segmentors.base import BaseSegmentor from mmseg.ops import resize @SEGMENT...
ViT-Adapter-main
segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former_aug.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, build_plugin_layer, kaiming_init from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transformer_layer_sequence) from mmcv.runner import force_fp32 from mmseg.mo...
ViT-Adapter-main
segmentation/mmseg_custom/models/decode_heads/maskformer_head.py
# Copyright (c) OpenMMLab. All rights reserved. from .mask2former_head import Mask2FormerHead from .maskformer_head import MaskFormerHead __all__ = [ 'MaskFormerHead', 'Mask2FormerHead', ]
ViT-Adapter-main
segmentation/mmseg_custom/models/decode_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transform...
ViT-Adapter-main
segmentation/mmseg_custom/models/decode_heads/mask2former_head.py
# Copyright (c) ByteDance, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """Mostly copy-paste from BEiT library: https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_cus...
ViT-Adapter-main
segmentation/mmcv_custom/layer_decay_optimizer_constructor.py
# Copyright (c) Open-MMLab. All rights reserved. import io import math import os import os.path as osp import pkgutil import time import warnings from collections import OrderedDict from importlib import import_module from tempfile import TemporaryDirectory import mmcv import numpy as np import torch import torchvisio...
ViT-Adapter-main
segmentation/mmcv_custom/checkpoint.py
import os.path as osp import pkgutil import time from collections import OrderedDict from importlib import import_module import mmcv import torch from torch.utils import model_zoo open_mmlab_model_urls = { 'vgg16_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/vgg16_caffe-292e1171...
ViT-Adapter-main
segmentation/mmcv_custom/my_checkpoint.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .checkpoint import load_checkpoint from .customized_text import CustomizedTextLoggerHook from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor from .my_checkpoint import my_load_checkpoint __all__ = [ 'LayerDecayOptimizerConstructor...
ViT-Adapter-main
segmentation/mmcv_custom/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import datetime from collections import OrderedDict import mmcv import torch from mmcv.runner import HOOKS, TextLoggerHo...
ViT-Adapter-main
segmentation/mmcv_custom/customized_text.py
# yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook', by_epoch=False), # dict(type='TensorboardLoggerHook') ]) # yapf:enable dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] cudnn_benchmark = True...
ViT-Adapter-main
segmentation/configs/_base_/default_runtime.py
# dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 1024) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize',...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes.py
# dataset settings dataset_type = 'NYUDepthV2Dataset' data_root = 'data/nyu_depth_v2/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (480, 480) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=Tru...
ViT-Adapter-main
segmentation/configs/_base_/datasets/nyu_depth_v2.py
# dataset settings dataset_type = 'PascalContextDataset' data_root = 'data/VOCdevkit/VOC2010/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (520, 520) crop_size = (480, 480) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnno...
ViT-Adapter-main
segmentation/configs/_base_/datasets/pascal_context.py
# dataset settings dataset_type = 'LoveDADataset' data_root = 'data/loveDA' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(...
ViT-Adapter-main
segmentation/configs/_base_/datasets/loveda.py
# dataset settings dataset_type = 'MapillaryDataset' data_root = 'data/Mapillary/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='MapillaryHa...
ViT-Adapter-main
segmentation/configs/_base_/datasets/mapillary_896x896.py
_base_ = './cityscapes.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (1024, 1024) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), dic...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes_1024x1024.py
_base_ = './pascal_voc12.py' # dataset settings data = dict( train=dict( ann_dir=['SegmentationClass', 'SegmentationClassAug'], split=[ 'ImageSets/Segmentation/train.txt', 'ImageSets/Segmentation/aug.txt' ]))
ViT-Adapter-main
segmentation/configs/_base_/datasets/pascal_voc12_aug.py
# dataset settings dataset_type = 'PascalContextDataset59' data_root = 'data/VOCdevkit/VOC2010/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (520, 520) crop_size = (480, 480) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAn...
ViT-Adapter-main
segmentation/configs/_base_/datasets/pascal_context_59.py
# dataset settings dataset_type = 'HRFDataset' data_root = 'data/HRF' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (2336, 3504) crop_size = (256, 256) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type=...
ViT-Adapter-main
segmentation/configs/_base_/datasets/hrf.py
# dataset settings dataset_type = 'COCOStuffDataset' data_root = 'data/coco_stuff164k' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize'...
ViT-Adapter-main
segmentation/configs/_base_/datasets/coco-stuff164k.py
_base_ = './cityscapes.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (768, 768) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), dict(...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes_768x768.py
# dataset settings dataset_type = 'COCOStuffDataset' data_root = 'data/coco_stuff10k' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True),...
ViT-Adapter-main
segmentation/configs/_base_/datasets/coco-stuff10k.py
# dataset settings dataset_type = 'PotsdamDataset' data_root = 'data/potsdam' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dic...
ViT-Adapter-main
segmentation/configs/_base_/datasets/potsdam.py
_base_ = './cityscapes.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), dict(...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes_896x896.py
# dataset settings dataset_type = 'ADE20KDataset' data_root = 'data/ade/ADEChallengeData2016' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_labe...
ViT-Adapter-main
segmentation/configs/_base_/datasets/ade20k.py
# dataset settings dataset_type = 'ChaseDB1Dataset' data_root = 'data/CHASE_DB1' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (960, 999) crop_size = (128, 128) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), d...
ViT-Adapter-main
segmentation/configs/_base_/datasets/chase_db1.py
_base_ = './cityscapes.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (832, 832) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), dict(...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes_832x832.py
_base_ = './cityscapes.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (769, 769) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), dict(...
ViT-Adapter-main
segmentation/configs/_base_/datasets/cityscapes_769x769.py
# dataset settings dataset_type = 'STAREDataset' data_root = 'data/STARE' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (605, 700) crop_size = (128, 128) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(typ...
ViT-Adapter-main
segmentation/configs/_base_/datasets/stare.py
# dataset settings dataset_type = 'DRIVEDataset' data_root = 'data/DRIVE' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (584, 565) crop_size = (64, 64) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type=...
ViT-Adapter-main
segmentation/configs/_base_/datasets/drive.py
# dataset settings dataset_type = 'PascalVOCDataset' data_root = 'data/VOCdevkit/VOC2012' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resi...
ViT-Adapter-main
segmentation/configs/_base_/datasets/pascal_voc12.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/fcn_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/pspnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=224, embed_dims=96, patch_size=4, ...
ViT-Adapter-main
segmentation/configs/_base_/models/upernet_swin.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), ...
ViT-Adapter-main
segmentation/configs/_base_/models/setr_naive.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/nonlocal_r50-d8.py
norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/vit-b16_p16_224-80ecf9dd.pth', # noqa backbone=dict( type='VisionTransformer', img_size=224, embed_dims=768, num_layers=12, num_heads=12, out_indices=(...
ViT-Adapter-main
segmentation/configs/_base_/models/dpt_vit-b16.py
# model_cfg num_things_classes = 80 num_stuff_classes = 91 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='XCiT', patch_size=16, embed_dim=384...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit_cocostuff.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://msra/hrnetv2_w18', backbone=dict( type='HRNet', norm_cfg=norm_cfg, norm_eval=False, extra=dict( stage1=dict( num_modul...
ViT-Adapter-main
segmentation/configs/_base_/models/fcn_hr18.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/isanet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/encnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='CGNet', norm_cfg=norm_cfg, in_channels=3, num_channels=(32, 64, 128), num_blocks=(3, 21), dilations=(2, 4), reductions=...
ViT-Adapter-main
segmentation/configs/_base_/models/cgnet.py
# model_cfg num_things_classes = 29 num_stuff_classes = 30 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='XCiT', patch_size=16, embed_dim=384...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit_pascal.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='ICNet', backbone_cfg=dict( type='ResNetV1c', in_channels=3, depth=50, num_stages=4, out_indices=(0, 1, 2, 3), ...
ViT-Adapter-main
segmentation/configs/_base_/models/icnet_r50-d8.py
# model_cfg num_things_classes = 100 num_stuff_classes = 50 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='XCiT', patch_size=16, embed_dim=38...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), ...
ViT-Adapter-main
segmentation/configs/_base_/models/setr_mla.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/ccnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/gcnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='UNet', in_channels=3, base_channels=64, num_stages=5, strides=(1, 1, 1, 1, 1), enc_num_convs=(2, 2, 2, 2, 2), ...
ViT-Adapter-main
segmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/dnl_r50-d8.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
ViT-Adapter-main
segmentation/configs/_base_/models/upernet_beit.py
# model_cfg num_things_classes = 8 num_stuff_classes = 11 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='XCiT', patch_size=16, embed_dim=384,...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit_cityscapes.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) model = dict( type='EncoderDecoder', backbone=dict( type='FastSCNN', downsample_dw_channels=(32, 48), global_in_channels=64, global_block_channels=(64, 96, 128), global_block_strides=(2, 2,...
ViT-Adapter-main
segmentation/configs/_base_/models/fast_scnn.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/upernet_r50.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='UNet', in_channels=3, base_channels=64, num_stages=5, strides=(1, 1, 1, 1, 1), enc_num_convs=(2, 2, 2, 2, 2), ...
ViT-Adapter-main
segmentation/configs/_base_/models/pspnet_unet_s5-d16.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='MixVisionTransformer', in_channels=3, embed_dims=32, num_stages=4, num_layers=[2, 2, 2, 2], num_heads=[1, 2, 5, 8], ...
ViT-Adapter-main
segmentation/configs/_base_/models/segformer_mit-b0.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='UNet', in_channels=3, base_channels=64, num_stages=5, strides=(1, 1, 1, 1, 1), enc_num_convs=(2, 2, 2, 2, 2), ...
ViT-Adapter-main
segmentation/configs/_base_/models/fcn_unet_s5-d16.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2), out_indices=...
ViT-Adapter-main
segmentation/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/danet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='CascadeEncoderDecoder', num_stages=2, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1...
ViT-Adapter-main
segmentation/configs/_base_/models/ocrnet_r50-d8.py
# model_cfg norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='XCiT', patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True, use_abs_pos_emb=Tr...
ViT-Adapter-main
segmentation/configs/_base_/models/maskformer_beit.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='CascadeEncoderDecoder', num_stages=2, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1...
ViT-Adapter-main
segmentation/configs/_base_/models/pointrend_r50.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/psanet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='BiSeNetV2', detail_channels=(64, 64, 128), semantic_channels=(16, 32, 64, 128), semantic_expansion_ratio=6, bga_channels=128,...
ViT-Adapter-main
segmentation/configs/_base_/models/bisenetv2.py
# model_cfg num_things_classes = 1 num_stuff_classes = 5 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='BEiT', patch_size=16, embed_dim=384, ...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit_potsdam.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='CascadeEncoderDecoder', num_stages=2, pretrained='open-mmlab://msra/hrnetv2_w18', backbone=dict( type='HRNet', norm_cfg=norm_cfg, norm_eval=False, extra=dict( stage1=dict( ...
ViT-Adapter-main
segmentation/configs/_base_/models/ocrnet_hr18.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/deeplabv3plus_r50-d8.py
# model settings backbone_norm_cfg = dict(type='LN') norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='PCPVT', init_cfg=dict( type='Pretrained', checkpoint='pretrained/pcpvt_small.pth'), in_channels=3, embed_d...
ViT-Adapter-main
segmentation/configs/_base_/models/twins_pcpvt-s_upernet.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/dmnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_base_p16_224-80ecf9dd.pth', backbone=dict( type='VisionTransformer', img_size=(512, 512), patch_size=16, in_channels=3, embed_dims=768,...
ViT-Adapter-main
segmentation/configs/_base_/models/upernet_vit-b16_ln_mln.py
# model settings norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='MobileNetV3', arch='large', out_indices=(1, 3, 16), norm_cfg=norm_cfg), decode_head=dict( type='LRASPPHead', in_channels=(1...
ViT-Adapter-main
segmentation/configs/_base_/models/lraspp_m-v3-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/deeplabv3_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/fpn_r50.py
# model settings backbone_norm_cfg = dict(type='LN') norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='PCPVT', init_cfg=dict( type='Pretrained', checkpoint='pretrained/pcpvt_small.pth'), in_channels=3, embed_d...
ViT-Adapter-main
segmentation/configs/_base_/models/twins_pcpvt-s_fpn.py
# model_cfg num_things_classes = 0 num_stuff_classes = 2 num_classes = num_things_classes + num_stuff_classes norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoderMask2Former', pretrained=None, backbone=dict( type='BEiT', patch_size=16, embed_dim=384, ...
ViT-Adapter-main
segmentation/configs/_base_/models/mask2former_beit_chase_db1.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/ann_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='BiSeNetV1', in_channels=3, context_channels=(128, 256, 512), spatial_channels=(64, 64, 64, 128), out_indices=(0, 1, 2), out_channels=256, ...
ViT-Adapter-main
segmentation/configs/_base_/models/bisenetv1_r18-d32.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/apcnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=...
ViT-Adapter-main
segmentation/configs/_base_/models/emanet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='ERFNet', in_channels=3, enc_downsample_channels=(16, 64, 128), enc_stage_non_bottlenecks=(5, 8), enc_non_bottleneck_dilations...
ViT-Adapter-main
segmentation/configs/_base_/models/erfnet_fcn.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), ...
ViT-Adapter-main
segmentation/configs/_base_/models/setr_pup.py