repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
fitclip | fitclip-main/aligner/encoder/videoclip_video_text_encoder.py | import os
from typing import Iterable, Iterator, Optional
import torch
from overrides import overrides
from torchvision import transforms as T
from transformers import AutoTokenizer
from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler
from aligner.encoder.s3dg import S3DG
from aligner.encoder.... | 4,268 | 39.657143 | 120 | py |
fitclip | fitclip-main/aligner/encoder/mil_nce_video_text_encoder.py | import re
from typing import Any, Iterable, Iterator, Mapping, Optional, Union
import numpy as np
import torch
from cached_path import cached_path
from overrides import overrides
from torch import nn
from torchvision import transforms as T
from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler
f... | 8,365 | 41.040201 | 120 | py |
fitclip | fitclip-main/aligner/encoder/video_text_encoder.py | from abc import abstractmethod
from typing import Callable, Iterable, Iterator, Mapping, Tuple
import torch
from overrides import overrides
from aligner.encoder.video_encoder import TYPE_VIDEO_INPUT, VideoEncoder
TYPE_TEXT_INPUT = Mapping[str, torch.Tensor]
TYPE_OUTPUT = Tuple[torch.Tensor, torch.Tensor]
TYPE_TOKEN... | 994 | 30.09375 | 85 | py |
fitclip | fitclip-main/aligner/encoder/video_encoder.py | from abc import abstractmethod
from typing import Callable, Optional, Tuple
import torch
from overrides import overrides
from torch import nn
from aligner.data.frame_sampler import FrameSampler
TYPE_VIDEO_INPUT = torch.Tensor
TYPE_TRANSFORM = Callable[[torch.Tensor], torch.Tensor]
class VideoEncoder(nn.Module):
... | 2,121 | 32.15625 | 107 | py |
fitclip | fitclip-main/aligner/encoder/slip.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# Copied from https://github.com/facebookresearch/SLIP/tree/c6faf5d
import gzip
import html
from collections import OrderedDict
from functools import lru_cache
from typing import Iterable, Iterator
import ftfy
import numpy as np
import regex ... | 22,773 | 34.640063 | 120 | py |
fitclip | fitclip-main/aligner/encoder/s3dg.py | # Initially copied from the MIL-NCE repo.
"""Contains the definition for Gated Separable 3D network (S3D-G). """
from typing import Literal, Tuple
import torch
from overrides import overrides
from torch import nn
from torch.nn.common_types import _size_3_t, _size_6_t
class InceptionBlock(nn.Module):
def __init__... | 9,814 | 43.817352 | 118 | py |
fitclip | fitclip-main/aligner/encoder/frozen_in_time.py | # Originally from https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/model.py
import logging
import sys
from typing import Any, Dict, Literal, Mapping, MutableMapping, Optional, Tuple, Union
import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from cached_path impo... | 9,799 | 49.515464 | 119 | py |
fitclip | fitclip-main/aligner/encoder/frozen_in_time_video_text_encoder.py | import os
from typing import Iterable, Iterator
import torch
from overrides import overrides
from torchvision import transforms as T
from transformers import AutoTokenizer
from aligner.data.frame_sampler import FrameSampler, RandomFromUniformIntervalsFrameSampler, UniformFrameSampler
from aligner.encoder.frozen_in_ti... | 3,686 | 37.810526 | 118 | py |
fitclip | fitclip-main/aligner/encoder/videoclip.py | import torch
import torch.utils.checkpoint
from torch import nn
from transformers import AutoConfig, BertModel, BertPreTrainedModel
from transformers.activations import ACT2FN
from transformers.models.bert.modeling_bert import BertEmbeddings, BertEncoder
class VideoTokenMLP(nn.Module):
def __init__(self, config):... | 28,028 | 38.256303 | 119 | py |
fitclip | fitclip-main/aligner/encoder/video_transformer.py | # From https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/video_transformer.py
"""
Implementations of Video Transformers in PyTorch
A PyTorch implementation of space-time transformer as described in
'Frozen in Time: A Joint Image and Video Encoder for End-to-End Retrieval' - https://arxiv.org/abs/2104.00650
... | 14,253 | 40.800587 | 120 | py |
fitclip | fitclip-main/aligner/encoder/clip_video_text_encoder.py | import os.path
import shutil
import tempfile
from typing import Iterable, Iterator, Tuple
import torch
from cached_path import cached_path
from clip import clip
from clip.model import CLIP
from overrides import overrides
from torch import nn
from torchvision import transforms as T
from aligner.data.frame_sampler impo... | 6,022 | 39.972789 | 120 | py |
fitclip | fitclip-main/aligner/encoder/slip_video_text_encoder.py | from typing import Iterable, Iterator, Union
import torch
from cached_path import cached_path
from overrides import overrides
from torchvision import transforms as T
from aligner.data.frame_sampler import FrameSampler, UniformFrameSampler
from aligner.encoder import slip
from aligner.encoder.slip import CLIP, SLIP, S... | 3,736 | 36.37 | 115 | py |
fitclip | fitclip-main/aligner/data/youcook2.py | import os
from glob import iglob
from typing import Optional, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_text_dataset import VideoTextDatase... | 2,183 | 41 | 116 | py |
fitclip | fitclip-main/aligner/data/moments_in_time.py | import functools
import os
from typing import Mapping, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
from... | 2,916 | 43.19697 | 116 | py |
fitclip | fitclip-main/aligner/data/frame_sampler.py | import itertools
from abc import ABC, abstractmethod
from typing import Optional, Sequence
import torch
from overrides import overrides
from util.iter_utils import pairwise
from util.video_utils import resample
class FrameSampler(ABC):
"""Returns the frame indices to seek for the given clip start and end frame ... | 3,069 | 38.87013 | 109 | py |
fitclip | fitclip-main/aligner/data/video_reader.py | import logging
from abc import ABC, abstractmethod
from typing import Sequence, Union
import PIL
import decord
import numpy as np
import torch
import torchvision.datasets
import torchvision.transforms.functional
from overrides import overrides
from util.typing_utils import TYPE_PATH
LOGGER = logging.getLogger(__name... | 4,118 | 33.90678 | 120 | py |
fitclip | fitclip-main/aligner/data/data_module_group.py | import bisect
from abc import ABC
from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Union
import pytorch_lightning as pl
from overrides import overrides
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.apply_func imp... | 10,134 | 47.492823 | 120 | py |
fitclip | fitclip-main/aligner/data/multi_source_sampler.py | import itertools
import math
import sys
from typing import Generic, Iterable, Iterator, Literal, TypeVar, Union
from torch.utils.data import Sampler
T_co = TypeVar("T_co", covariant=True)
# We don't use `CycleIterator` from PyTorch Lightning because when used along with `itertools.islice`,
# it always creates a new... | 4,191 | 38.92381 | 119 | py |
fitclip | fitclip-main/aligner/data/video_data_module.py | import multiprocessing
from abc import ABC, abstractmethod
from typing import Any, Iterable, Mapping, MutableMapping, Optional, Union
import pytorch_lightning as pl
import torch.cuda
from overrides import overrides
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch.utils.data import Data... | 4,101 | 44.577778 | 114 | py |
fitclip | fitclip-main/aligner/data/video_text_dataset.py | from abc import ABC
from typing import Mapping, Union
from torch.utils.data.dataloader import default_collate
from aligner.data.tokenizer_collate import MappingTokenizerCollate
from aligner.data.video_dataset import VideoDataset
from aligner.encoder.video_text_encoder import TYPE_TOKENIZER
class VideoTextDataset(Vi... | 744 | 42.823529 | 117 | py |
fitclip | fitclip-main/aligner/data/hmdb.py | import functools
import glob
import os
from typing import Iterable, Literal, Mapping, Optional, Tuple
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.ucf import UCF_101_TEMPLATES
from aligner.data.video_data_module import VideoClassificatio... | 3,441 | 39.023256 | 116 | py |
fitclip | fitclip-main/aligner/data/webvid.py | import os
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoDataset
from aligner.data.video_text_dataset import VideoTextDataset
from... | 3,814 | 49.197368 | 120 | py |
fitclip | fitclip-main/aligner/data/video_dataset.py | import collections.abc
import functools
import logging
import os
from abc import ABC, abstractmethod
from typing import Any, Generic, Iterable, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import torch
from overrides import overrides
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Datase... | 5,261 | 43.59322 | 120 | py |
fitclip | fitclip-main/aligner/data/tokenizer_collate.py | import collections.abc
from abc import ABC, abstractmethod
from typing import Any, Callable, Iterable, Mapping, Sequence, Tuple, Union
from overrides import overrides
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch.utils.data.dataloader import default_collate
from aligner.encoder.vid... | 4,865 | 53.066667 | 120 | py |
fitclip | fitclip-main/aligner/data/kinetics.py | import os
from typing import Iterable, Mapping, Optional, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
f... | 6,115 | 49.131148 | 116 | py |
fitclip | fitclip-main/aligner/data/msrvtt.py | import json
import os
import random
from typing import Literal
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoDataset
from aligner... | 3,743 | 45.8 | 119 | py |
fitclip | fitclip-main/aligner/data/didemo.py | import json
import os
from collections import defaultdict
from cached_path import CACHE_DIR, _find_latest_cached, cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_text_dataset import VideoTextData... | 3,269 | 47.088235 | 119 | py |
fitclip | fitclip-main/aligner/data/conceptual_captions.py | import functools
import os
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from torchvision.datasets.folder import IMG_EXTENSIONS
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoD... | 3,241 | 48.121212 | 116 | py |
fitclip | fitclip-main/aligner/data/ucf.py | import functools
import os
import re
from typing import Iterable, Mapping, Optional, Tuple
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDa... | 5,400 | 40.229008 | 116 | py |
VQ-Diffusion | VQ-Diffusion-main/inference_VQ_Diffusion.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import torch
import cv2
import ... | 9,903 | 48.029703 | 239 | py |
VQ-Diffusion | VQ-Diffusion-main/train.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import argparse
import os
import warnings
import time
import torch
from image_synthesis.modeling.build import build_m... | 6,809 | 39.058824 | 138 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/distributed/launch.py | import os
import torch
from torch import distributed as dist
from torch import multiprocessing as mp
# import distributed as dist_fn
import image_synthesis.distributed.distributed as dist_fn
def find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
... | 2,604 | 26.712766 | 101 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/distributed/distributed.py | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
LOCAL_PROCESS_GROUP = None
def is_primary():
return get_rank() == 0
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.g... | 3,169 | 20.418919 | 76 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/lr_scheduler.py | import torch
import math
# from torch.optim import AdamW, Adam
from torch._six import inf
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR
class ReduceLROnPlateauWithWarmup(object):
"""Reduce learning rate when a metric has stopped improving.
Mo... | 11,992 | 40.071918 | 128 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/clip_grad_norm.py | from torch.nn.utils import clip_grad_norm_
class ClipGradNorm(object):
def __init__(self,
start_iteration=0,
end_iteration=-1, # if negative, the norm will be always clipped
max_norm=0.5):
self.start_iteration = start_iteration
self.end_iteratio... | 935 | 29.193548 | 81 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/logger.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import sys
import torch
from image_synthesis.utils.io import write_args, save_config_to_yaml
from image_synthesis.distributed.distributed import is_primary
import torch.utils.tensorboard a... | 3,005 | 32.4 | 132 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/solver.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import os
import time
import math
import torch
import threading
import multiprocessing
import copy
from PIL import Im... | 26,443 | 47.08 | 204 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/ema.py | import torch
import copy
class EMA(object):
def __init__(self,
model,
decay=0.99,
update_interval=1,
device=torch.device('cpu')):
self.decay = decay
self.update_iterval = update_interval
self.device = device
se... | 2,968 | 42.028986 | 127 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/utils/misc.py | import importlib
import random
import numpy as np
import torch
import warnings
import os
def seed_everything(seed, cudnn_deterministic=False):
"""
Function that sets seed for pseudo-random number generators in:
pytorch, numpy, python.random
Args:
seed: the integer value seed for global ra... | 5,066 | 29.160714 | 119 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/utils/io.py | import sys
import yaml
import torch
import json
def load_yaml_config(path):
with open(path) as f:
config = yaml.full_load(f)
return config
def save_config_to_yaml(config, path):
assert path.endswith('.yaml')
with open(path, 'w') as f:
f.write(yaml.dump(config))
f.close()
def ... | 1,067 | 28.666667 | 98 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/cub200_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
from tqdm import tqdm
import pickle
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class Cub2... | 2,040 | 33.016667 | 129 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/mscoco_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class CocoDataset(Dataset):
def __init__(s... | 1,873 | 36.48 | 129 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/imagenet_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class ImageNetDataset(Dataset):
def __init... | 2,016 | 33.775862 | 132 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/ffhq_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
import torchvision.datasets as datasets
class FFHQDataset(datasets.ImageFolder):
def __init__(self, data_root, im_preprocessor_c... | 848 | 31.653846 | 86 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/build.py | import torch
# from image_synthesis.data.base_dataset import ConcatDatasetWithIndex as ConcatDataset
from torch.utils.data import ConcatDataset
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.distributed.distributed import is_distributed
def build_dataloader(config, args=None, retur... | 3,454 | 44.460526 | 100 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/image_preprocessor.py | import albumentations
import random
import numpy as np
from PIL import Image
import cv2
from io import BytesIO
from torchvision import transforms as trans
class DalleTransformerPreprocessor(object):
def __init__(self,
size=256,
phase='train',
additional_targets=N... | 3,890 | 35.364486 | 140 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/comm.py | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import torch
import torch.distributed as dist
# from diffdist.functional import all_gather as better_all_gather
class Comm(object):
def __init__(self, local_rank=0):
self.loca... | 6,860 | 28.701299 | 103 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/manage.py | from sys import stdout
import zipfile
import os.path as osp
import lmdb
import logging
from PIL import Image
import pickle
import io
import glob
import os
from pathlib import Path
import time
from threading import Thread
from queue import Queue,Empty
import subprocess
def func_wrapper(func):
def sub_func(queue,kwa... | 11,184 | 25.630952 | 122 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/modules/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
... | 7,962 | 35.360731 | 142 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/modules/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is ... | 17,333 | 38.848276 | 178 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/base_codec.py | import torch
from torch import nn
class BaseCodec(nn.Module):
def get_tokens(self, x, **kwargs):
"""
Input:
x: input data
Return:
indices: B x L, the codebook indices, where L is the length
of flattened feature map size
"""
... | 1,046 | 23.348837 | 72 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/taming_gumbel_vqvae.py | import torch
import torch.nn as nn
from omegaconf import OmegaConf
import sys
sys.path.append("..")
# sys.path.append("../image_synthesis")
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.models.vqgan import GumbelVQ, VQModel
from image_synthesis.taming.models.cond_transformer... | 10,011 | 33.885017 | 112 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/patch_vqgan.py | from numpy.core.shape_base import block
from numpy.lib import stride_tricks
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import random
from torch.nn.modules.linear import Linear
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.modeling.codecs.... | 35,439 | 38.116998 | 147 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/ema_vqvae.py | import torch
import torch.nn as nn
from omegaconf import OmegaConf
import sys
sys.path.append("..")
# sys.path.append("../image_synthesis")
import os
import torchvision.transforms.functional as TF
import PIL
from image_synthesis.modeling.codecs.base_codec import BaseCodec
from einops import rearrange
import math
import... | 4,083 | 29.706767 | 112 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/text_codec/tokenize.py | import torch
import torch.nn as nn
from image_synthesis.modeling.modules.clip.clip import tokenize
from image_synthesis.modeling.codecs.base_codec import BaseCodec
from image_synthesis.utils.misc import instantiate_from_config
class Tokenize(BaseCodec):
def __init__(self, context_length:int = 256,
... | 3,124 | 36.202381 | 104 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/conditional_dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import t... | 11,968 | 40.559028 | 154 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/unconditional_dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import t... | 8,216 | 35.52 | 138 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import t... | 14,512 | 43.246951 | 154 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/class_embedding.py | import torch
import torch.nn as nn
from .base_embedding import BaseEmbedding
class ClassEmbedding(BaseEmbedding):
def __init__(self,
num_embed=1000,
embed_dim=512,
identity=False,
trainable=True,
):
super().__init__()
self... | 899 | 26.272727 | 74 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/dalle_mask_image_embedding.py | import torch
import torch.nn as nn
from .base_embedding import BaseEmbedding
class DalleMaskImageEmbedding(BaseEmbedding):
def __init__(self,
num_embed=8192,
spatial_size=[32, 32], # height and with
embed_dim=3968,
trainable=True,
... | 2,507 | 42.241379 | 173 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/base_embedding.py | import torch
from torch import nn
class BaseEmbedding(nn.Module):
def get_loss(self):
return None
def forward(self, **kwargs):
raise NotImplementedError
def train(self, mode=True):
self.training = mode
if self.trainable and mode:
super().train()
retur... | 507 | 19.32 | 49 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/clip_text_embedding.py | import torch
import torch.nn as nn
from image_synthesis.modeling.modules.clip import clip
from image_synthesis.modeling.modules.clip import model as clip_model
from .base_embedding import BaseEmbedding
class CLIPTextEmbedding(BaseEmbedding):
def __init__(self,
clip_name='ViT-B/32',
... | 3,423 | 37.47191 | 121 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/utils/misc.py | from numpy.core.fromnumeric import resize
from numpy.lib.function_base import kaiser
from numpy.lib.npyio import save
import torch
import random
import math
from image_synthesis.distributed.distributed import all_reduce, get_world_size
def logits_top_k(logits, filter_ratio = 0.5, minimum=1, pad_value=None):
logits... | 5,282 | 32.01875 | 114 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/transformers/diffusion_transformer.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import math
import torch
from torch import nn
import torch.nn.functional as F
from image_synthesis.utils.misc import... | 29,919 | 42.678832 | 166 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/transformers/transformer_utils.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import math
import torch
from torch import nn
import torch.nn.functional as F
from image_synthesis.utils.misc import... | 30,407 | 41 | 131 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/util.py | import torch
import torch.nn as nn
def count_params(model):
total_params = sum(p.numel() for p in model.parameters())
return total_params
class ActNorm(nn.Module):
def __init__(self, num_features, logdet=False, affine=True,
allow_reverse_init=False):
assert affine
super(... | 3,847 | 28.374046 | 85 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/vqvae/quantize.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import einsum
from einops import rearrange
class VectorQuantizer(nn.Module):
"""
see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
_____________________... | 13,259 | 39.181818 | 110 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/discriminator/model.py | import functools
import torch.nn as nn
from image_synthesis.taming.modules.util import ActNorm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight... | 2,566 | 36.75 | 116 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/misc/coord.py | import torch
class CoordStage(object):
def __init__(self, n_embed, down_factor):
self.n_embed = n_embed
self.down_factor = down_factor
def eval(self):
return self
def encode(self, c):
"""fake vqmodel interface"""
assert 0.0 <= c.min() and c.max() <= 1.0
b,c... | 904 | 27.28125 | 79 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/diffusionmodules/model.py | # pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import numpy as np
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This mat... | 30,221 | 37.895753 | 121 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/transformer/mingpt.py | """
taken from: https://github.com/karpathy/minGPT/
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block... | 15,743 | 40.10705 | 140 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/transformer/permuter.py | import torch
import torch.nn as nn
import numpy as np
class AbstractPermuter(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, reverse=False):
raise NotImplementedError
class Identity(AbstractPermuter):
def __init__(self):
super().__init__()... | 7,093 | 27.48996 | 83 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/lpips.py | """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from image_synthesis.taming.util import get_ckpt_path
class LPIPS(nn.Module):
# Learned perceptual metric
def __... | 4,778 | 38.172131 | 104 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/segmentation.py | import torch.nn as nn
import torch.nn.functional as F
class BCELoss(nn.Module):
def forward(self, prediction, target):
loss = F.binary_cross_entropy_with_logits(prediction,target)
return loss, {}
class BCELossWithQuant(nn.Module):
def __init__(self, codebook_weight=1.):
super().__ini... | 816 | 34.521739 | 82 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/vqperceptual.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from image_synthesis.taming.modules.losses.lpips import LPIPS
from image_synthesis.taming.modules.discriminator.model import NLayerDiscriminator, weights_init
class DummyLoss(nn.Module):
def __init__(self):
super().__init__()
def adopt_... | 6,211 | 44.343066 | 113 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/models/vqgan.py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.modules.diffusionmodules.model import Encoder, Decoder
from image_synthesis.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
fr... | 10,554 | 39.28626 | 120 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/models/cond_transformer.py | import os, math
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.modules.util import SOSProvider
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure trai... | 15,049 | 42.75 | 127 | py |
Reflect | Reflect-master/distill/distill_util.py | import tensorflow as tf
from tf2_models.metrics import distill_loss, sequence_distill_loss
@tf.function(experimental_relax_shapes=True)
def get_topk_mask(inputs, k):
inputs_shape = tf.shape(inputs)
inputs_shape = tf.cast(inputs_shape, dtype=tf.int64)
values, indices = tf.nn.top_k(inputs, k=k, sorted=False)
i... | 3,653 | 35.54 | 110 | py |
Reflect | Reflect-master/distill/distiller.py | import tensorflow as tf
import os
from distill.distill_util import get_distill_scheduler
from tf2_models.train_utils import ExponentialDecayWithWarmpUp
from tf2_models.trainer import OPTIMIZER_DIC
import numpy as np
class Distiller(object):
''' Pipeline for offline distillation.
'''
def __init__(self, hparams,... | 9,284 | 44.292683 | 132 | py |
Reflect | Reflect-master/tf2_models/embedding.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
class SharedEmbeddings(tf.keras.layers.Layer):
"""Construct shared token embeddings.
"""
def __init__(self, vocab_size, hidden_size, initializer_range=None, regularizer=None, **kwargs):
super(SharedEmbeddings, self)._... | 2,633 | 38.313433 | 137 | py |
Reflect | Reflect-master/tf2_models/lm_transformer.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
from tf2_models.transformers import *
class LmGPT2(tf.keras.Model):
def __init__(self, hparams, scope='lm_gpt2', *inputs, **kwargs... | 10,814 | 39.965909 | 109 | py |
Reflect | Reflect-master/tf2_models/ff.py | import tensorflow as tf
import numpy as np
class VanillaFF(tf.keras.models.Sequential):
def __init__(self, hparams, scope="cl_vff", *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(VanillaFF, self).__init__()
self.scope = scope
self.hparams = hparams
self.model_n... | 3,116 | 36.107143 | 92 | py |
Reflect | Reflect-master/tf2_models/common_layers.py | import tensorflow as tf
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import nest
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform ac... | 3,398 | 34.041237 | 72 | py |
Reflect | Reflect-master/tf2_models/lm_lstm.py | import absl
import tensorflow as tf
import numpy as np
from tensorboard.compat.tensorflow_stub import tensor_shape
from tensorflow.python.util import nest
from tf2_models.common_layers import get_initializer
from tf2_models.embedding import SharedEmbeddings
from tf2_models.utils import create_init_var
class LmLSTM(tf... | 23,117 | 47.364017 | 138 | py |
Reflect | Reflect-master/tf2_models/transformers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
class GPT2(tf.keras.layers.Layer):
def __init__(self, hparams, *inputs, **kwargs):
super(GPT2, self).__init__(hparams, *input... | 16,938 | 40.619165 | 113 | py |
Reflect | Reflect-master/tf2_models/resnet.py | import tensorflow as tf
class ResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(ResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regularizer... | 6,572 | 40.601266 | 94 | py |
Reflect | Reflect-master/tf2_models/cnn.py | import tensorflow as tf
import numpy as np
def max_out(inputs, num_units, axis=None):
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = -1
if axis is None: # Assume that channel is the last dimension
axis = -1
num_channels = shape[axis]
if num_channels % num_units:
raise Valu... | 5,878 | 38.993197 | 91 | py |
Reflect | Reflect-master/tf2_models/utils.py | import tensorflow as tf
import re
from tensorboard.compat.tensorflow_stub import tensor_shape
def camel2snake(name):
return name[0].lower() + re.sub(r'(?!^)[A-Z]', lambda x: '_' + x.group(0).lower(), name[1:])
def log_summary(log_value, log_name, summary_scope):
"""Produce scalar summaries."""
with tf.compat.... | 884 | 31.777778 | 99 | py |
Reflect | Reflect-master/tf2_models/train_utils.py | import absl
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import LearningRateSchedule
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow_addons.utils import keras_uti... | 17,416 | 40.568019 | 92 | py |
Reflect | Reflect-master/tf2_models/transformer_layers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list, gelu
class Attention(tf.keras.layers.Layer):
def __init__(self, hidden_dim, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.output_attentions = c... | 6,560 | 35.049451 | 105 | py |
Reflect | Reflect-master/tf2_models/ff_resnet.py | import tensorflow as tf
class FFResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(FFResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regular... | 6,118 | 39.256579 | 96 | py |
Reflect | Reflect-master/tf2_models/keras_callbacks.py | import tensorflow as tf
from tf2_models.utils import log_summary
class CheckpointCallback(tf.keras.callbacks.Callback):
def __init__(self, manager, ckpt):
super(CheckpointCallback, self).__init__()
self.manager = manager
self.ckpt = ckpt
def on_epoch_end(self, epoch, logs=None):
self.ckpt.step.... | 1,859 | 38.574468 | 148 | py |
Reflect | Reflect-master/tf2_models/metrics.py | import tensorflow as tf
@tf.function(experimental_relax_shapes=True)
def distill_loss(y_true, y_pred, tmp):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32)
scale_factor = 1.0 / (tmp*tmp)
return tf.reduce_mean(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp,
... | 10,276 | 46.578704 | 117 | py |
Reflect | Reflect-master/tf2_models/trainer.py | import tensorflow as tf
import os
from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback
from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp
OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam,
'radam': RectifiedAdam,
}
class Trainer(object)... | 3,931 | 39.536082 | 122 | py |
Reflect | Reflect-master/tfds_data/tal_agreement.py | from collections import Counter
import tensorflow as tf
import tensorflow_datasets as tfds
import os
import numpy as np
from tensorflow_datasets.core.features.text import Tokenizer
from tensorflow_datasets.core.features.text.text_encoder import write_lines_to_file, read_lines_from_file
from prep_data.build_dictionary... | 8,680 | 35.020747 | 106 | py |
Reflect | Reflect-master/tasks/sv_agreement.py | import functools
from distill.distill_util import DistillLoss, get_probs, SequenceDistillLoss, get_topk_masked_probs, get_masked_probs
from tasks.task import Task
import tensorflow as tf
from tf2_models import metrics
from tf2_models.metrics import masked_batch_perplexity, masked_perplexity, \
MaskedSequenceLoss, C... | 5,424 | 44.208333 | 163 | py |
Reflect | Reflect-master/tasks/mnist.py | from distill.distill_util import DistillLoss, get_probs
from tasks.task import Task
import tensorflow as tf
import tensorflow_datasets as tfds
from tf2_models.metrics import ClassificationLoss
from tfds_data.aff_nist import AffNist
class Mnist(Task):
def __init__(self, task_params, name='mnist', data_dir='mnist_da... | 8,663 | 37.678571 | 103 | py |
PyKrige | PyKrige-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyKrige documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 1 18:34:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# au... | 11,242 | 30.940341 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/misc/__init__.py | import random
import numpy as np
import torch
def set_seed(seed: int):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
| 149 | 12.636364 | 27 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/mlp_pytorch.py | import tempfile
import uuid
from pathlib import Path
from typing import Optional, Tuple
from sklearn.preprocessing import StandardScaler
from constants import num_gradient_updates
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, TensorDatase... | 6,805 | 36.191257 | 126 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.