repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/benchmark.py | import numpy as np
import pandas as pd
from blackbox.load_utils import evaluation_split_from_task, tasks
from optimizer.normalization_transforms import from_string
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn import ParametricPriorSklearn
normalization = "gaussian"
rows = []
#tasks = [
# '... | 1,360 | 26.22 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/optimizer_names.py |
class names:
# put names into a class to add structure and avoid having lots of imports
RS = "RS"
# ablation
GP = "GP"
GCP_ho_prior = "GCP + homosk. prior"
GCP = "GCP"
GCP_prior = "GCP + prior (ours)"
GP_prior = "GP + prior"
CTS_ho_prior = "CTS + homosk. prior"
CTS_prior = "CTS... | 2,513 | 27.247191 | 94 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2.py | from typing import List, Optional
import pandas as pd
import numpy as np
from pathlib import Path
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
path = Path(__file__).parent
def adtm_scores(df, optimi... | 3,258 | 30.038095 | 110 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2-new-implem.py | import os
import pandas as pd
from pathlib import Path
from experiments.load_results import load_results_paper, load_results_reimplem, add_adtm
from experiments.optimizer_names import names
from experiments.table2 import adtm_scores, rank
path = Path(__file__).parent
if __name__ == '__main__':
df_paper = load_... | 912 | 26.666667 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/figure1.py | from pathlib import Path
import matplotlib.pyplot as plt
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
from experiments.optimizer_styles import optimizer_style
path = Path(__file__).parent
def plot_o... | 2,457 | 27.581395 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process_functional_prior.py | from typing import Optional, Tuple, Callable, Union, List
import logging
import numpy as np
import torch
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.constraints import GreaterThan
from gpytorch.likelihoods import GaussianLikelihood
from torch import Tensor
from torch.distributions import Normal
from ... | 9,128 | 33.711027 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/thompson_sampling_functional_prior.py | import logging
from typing import Optional, List, Tuple
import numpy as np
from constants import num_gradient_updates
from optimizer import Optimizer
from optimizer.normalization_transforms import from_string
from optimizer.random_search import RS
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn im... | 2,528 | 35.128571 | 106 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process.py | import logging
from typing import Optional
import numpy as np
import torch
from botorch import fit_gpytorch_model
from botorch.acquisition import ExpectedImprovement
from botorch.models import SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.utils.transforms import normalize
from gpytorch import Exact... | 4,389 | 32.51145 | 109 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/offline.py | from pathlib import Path
import pandas as pd
import numpy as np
deepar = 'DeepAR'
fcnet = 'FCNET'
xgboost = 'XGBoost'
nas102 = 'nas_bench102'
metric_error = 'metric_error'
metric_time = 'metric_time'
def evaluations_df(blackbox: str) -> pd.DataFrame:
"""
:returns a dataframe where each row corresponds to on... | 2,136 | 27.878378 | 98 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/load_utils.py | import logging
from typing import Tuple, List
import numpy as np
from blackbox.offline import evaluations_df, deepar, fcnet, nas102, xgboost
blackbox_tasks = {
nas102: [
'cifar10',
'cifar100',
'ImageNet16-120'
],
fcnet: [
'naval',
'parkinsons',
'protein',
... | 4,186 | 28.076389 | 107 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_prior.py | import numpy as np
from prior.mlp_pytorch import ParametricPrior
num_train_examples = 10000
num_test_examples = num_train_examples
dim = 2
num_gradient_updates = 200
lr = 1e-2
def make_random_X_y(num_examples: int, dim: int, noise_std: float):
X = np.random.rand(num_examples, dim)
noise = np.random.normal(sc... | 1,884 | 29.403226 | 101 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_optimization.py | import logging
import random
from functools import partial
import numpy as np
import pytest
import torch
from blackbox import Blackbox, BlackboxOffline
from misc import set_seed
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
from optimizer.gaussian_process_functional_prior... | 2,572 | 26.967391 | 83 | py |
PC-JeDi | PC-JeDi-main/src/physics.py | # import jetnet
import numpy as np
import pytorch_lightning as pl
import torch as T
# FIX RANDOM SEED FOR REPRODUCIBILITY
pl.seed_everything(0, workers=True)
def locals_to_mass_and_pt(csts: T.Tensor, mask: T.BoolTensor) -> T.Tensor:
"""Calculate the overall jet pt and mass from the constituents. The
constitu... | 2,120 | 30.191176 | 84 | py |
PC-JeDi | PC-JeDi-main/src/torch_utils.py | from typing import Union
import numpy as np
import torch as T
import torch.nn as nn
def get_loss_fn(name: str, **kwargs) -> nn.Module:
"""Return a pytorch loss function given a name."""
if name == "none":
return None
# Regression losses
if name == "huber":
return nn.HuberLoss(reducti... | 918 | 26.848485 | 77 | py |
PC-JeDi | PC-JeDi-main/src/hydra_utils.py | """A collection of misculaneous functions usefull for the lighting/hydra
template."""
import logging
import os
from pathlib import Path
from typing import Any, List, Sequence
import hydra
import rich
import rich.syntax
import rich.tree
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning im... | 5,097 | 30.8625 | 86 | py |
PC-JeDi | PC-JeDi-main/src/datamodules/jetnet.py | from copy import deepcopy
from typing import Mapping
import numpy as np
from jetnet.datasets import JetNet
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.numpy_utils import log_squash
from src.physics import numpy_locals_to_mass_and_pt
class JetNetData(Da... | 3,490 | 34.989691 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/diffusion.py | import math
from typing import Optional, Tuple
import torch as T
from tqdm import tqdm
class VPDiffusionSchedule:
def __init__(self, max_sr: float = 1, min_sr: float = 1e-2) -> None:
self.max_sr = max_sr
self.min_sr = min_sr
def __call__(self, time: T.Tensor) -> T.Tensor:
return cosi... | 11,263 | 33.873065 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/transformers.py | """Some classes to describe transformer architectures."""
import math
from typing import Mapping, Optional, Union
import torch as T
import torch.nn as nn
from torch.nn.functional import dropout, softmax
from .modules import DenseNetwork
def merge_masks(
q_mask: Union[T.BoolTensor, None],
kv_mask: Union[T.B... | 15,049 | 33.837963 | 87 | py |
PC-JeDi | PC-JeDi-main/src/models/schedulers.py | from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class WarmupToConstant(_LRScheduler):
"""Gradually warm-up learning rate in optimizer to a constant value."""
def __init__(self, optimizer: Optimizer, num_steps: int = 100) -> None:
"""
args:
optim... | 793 | 32.083333 | 85 | py |
PC-JeDi | PC-JeDi-main/src/models/modules.py | """Collection of pytorch modules that make up the networks."""
import math
from typing import Optional, Union
import torch as T
import torch.nn as nn
def get_act(name: str) -> nn.Module:
"""Return a pytorch activation function given a name."""
if name == "relu":
return nn.ReLU()
if name == "lrlu... | 20,518 | 35.575758 | 90 | py |
PC-JeDi | PC-JeDi-main/src/models/pc_jedi.py | import copy
from functools import partial
from typing import Mapping, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch as T
import wandb
from jetnet.evaluation import w1efp, w1m, w1p
from src.models.diffusion import VPDiffusionSchedule, run_sampler
from src.models.modules import CosineEn... | 12,805 | 38.403077 | 87 | py |
PC-JeDi | PC-JeDi-main/scripts/train.py | import pyrootutils
root = pyrootutils.setup_root(search_from=__file__, pythonpath=True)
import logging
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig
from src.hydra_utils import (
instantiate_collection,
log_hyperparameters,
print_config,
reload_original_config,
sav... | 1,731 | 23.742857 | 87 | py |
trees_from_transformers | trees_from_transformers-master/run.py | import argparse
import datetime
import logging
import os
import pickle
from tqdm import tqdm
import torch
from transformers import *
from data.dataset import Dataset
from utils.measure import Measure
from utils.parser import not_coo_parser, parser
from utils.tools import set_seed, select_indices, group_indices
from u... | 11,441 | 45.893443 | 85 | py |
trees_from_transformers | trees_from_transformers-master/utils/score.py | import numpy as np
import torch
from utils.yk import get_stats
class Score(object):
def __init__(self, n):
self.corpus_f1 = torch.zeros(n, 3, dtype=torch.float)
self.sent_f1 = torch.zeros(n, dtype=torch.float)
self.n = n
self.cnt = 0
self.labels = ['SBAR', 'NP', 'VP', 'PP'... | 2,521 | 35.550725 | 79 | py |
trees_from_transformers | trees_from_transformers-master/utils/tools.py | import logging
import random
import torch
specials = {'bert': '#', 'gpt2': 'Ġ', 'xlnet': '▁', 'roberta': 'Ġ'}
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def select_indices(tokens, raw_tokens, model, mode):
mask = []
raw_i = 0
collapsed = ''
... | 1,612 | 24.603175 | 68 | py |
trees_from_transformers | trees_from_transformers-master/utils/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Extractor(nn.Module):
def __init__(self, n_hidden):
super(Extractor, self).__init__()
self.linear = nn.Linear(n_hidden * 2, 1)
nn.init.uniform_(self.linear.weight, -0.01, 0.01)
nn.init.uniform_(self.linear.bias... | 752 | 27.961538 | 77 | py |
trees_from_transformers | trees_from_transformers-master/utils/measure.py | import math
import torch
import torch.nn.functional as F
from utils.score import Score
class Measure(object):
def __init__(self, n_layers, n_att):
self.h_measures = ['cos', 'l1', 'l2']
self.a_measures = ['hellinger', 'jsd']
self.a_avg_measures = ['avg_hellinger', 'avg_jsd']
self.m... | 3,102 | 33.477778 | 82 | py |
pi-peps | pi-peps-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup ------------------------------------------------------------... | 5,615 | 28.557895 | 79 | py |
SSTAP | SSTAP-main/main.py | import sys
from dataset import VideoDataSet, VideoDataSet_unlabel
from loss_function import bmn_loss_func, get_mask
import os
import json
import torch
import torch.nn.parallel
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import numpy as np
import opts
from ipdb import set_trace
from... | 42,436 | 48.173812 | 190 | py |
SSTAP | SSTAP-main/dataset.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import torch.utils.data as data
import torch
from utils import ioa_with_anchors, iou_with_anchors
from ipdb import set_trace
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_da... | 14,230 | 51.707407 | 155 | py |
SSTAP | SSTAP-main/loss_function.py | # -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale):
bm_mask = []
for idx in range(tscale):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.arr... | 3,482 | 32.171429 | 90 | py |
SSTAP | SSTAP-main/models.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import torch
import torch.nn as nn
from ipdb import set_trace
import random
import torch.nn.functional as F
class TemporalShift(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift, self).__init__()
# self.n... | 13,366 | 43.115512 | 138 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/ldb_process.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 15 22:31:31 2017
@author: wzmsltw
"""
import caffe
import leveldb
import numpy as np
from caffe.proto import caffe_pb2
import pandas as pd
col_names=[]
for i in range(200):
col_names.append("f"+str(i))
df=pd.read_table("./input_spatial_list.txt",names=['image','fram... | 983 | 21.883721 | 84 | py |
Graph-Unlearning | Graph-Unlearning-main/main.py | import logging
import torch
from exp.exp_graph_partition import ExpGraphPartition
from exp.exp_node_edge_unlearning import ExpNodeEdgeUnlearning
from exp.exp_unlearning import ExpUnlearning
from exp.exp_attack_unlearning import ExpAttackUnlearning
from parameter_parser import parameter_parser
def config_logger(save... | 1,499 | 27.846154 | 131 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/sdne.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Wang D, Cui P, Zhu W. Structural deep network embedding[C]//Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2016: 1225-1234.(https://www.kdd.org/kdd2016/papers/file... | 6,214 | 34.514286 | 252 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/line.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Tang J, Qu M, Wang M, et al. Line: Large-scale information network embedding[C]//Proceedings of the 24th International Conference on World Wide Web. International World Wide Web Conferences Steering Committee, 2015: 1067-... | 7,184 | 32.574766 | 272 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/utils.py | import os
import errno
import numpy as np
import pandas as pd
import networkx as nx
import torch
from scipy.sparse import coo_matrix
from tqdm import tqdm
def graph_reader(path):
"""
Function to read the graph from the path.
:param path: Path to the edge list.
:return graph: NetworkX object returned.... | 4,851 | 27.046243 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/opt_dataset.py | from torch.utils.data import Dataset
class OptDataset(Dataset):
def __init__(self, posteriors, labels):
self.posteriors = posteriors
self.labels = labels
def __getitem__(self, index):
ret_posterior = {}
for shard, post in self.posteriors.items():
ret_posterior[sha... | 448 | 22.631579 | 51 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/optimal_aggregator.py | import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from torch_geometric.data import Data
from lib_aggregator.opt_dataset import OptDataset
from... | 4,054 | 37.990385 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/aggregator.py | import logging
import torch
torch.cuda.empty_cache()
from sklearn.metrics import f1_score
import numpy as np
from lib_aggregator.optimal_aggregator import OptimalAggregator
from lib_dataset.data_store import DataStore
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self... | 2,958 | 35.9875 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/metis_partition.py | import numpy as np
import networkx as nx
import pymetis
from torch_geometric.data import ClusterData
from torch_geometric.utils import from_networkx
from lib_graph_partition.partition import Partition
class MetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(MetisPartition, self)._... | 2,609 | 38.545455 | 170 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gnn_base.py | import logging
import pickle
import torch
class GNNBase:
def __init__(self):
self.logger = logging.getLogger('gnn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = None
self.embedding_dim = 0
... | 1,482 | 28.078431 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/node_classifier.py | import logging
import os
import torch
from sklearn.model_selection import train_test_split
torch.cuda.empty_cache()
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from torch_geometric.nn.conv.gcn_co... | 7,966 | 38.636816 | 114 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid, Reddit
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gin.gin_net import GINNet
import config
class GIN(GNNBase):
def __init__(self, num_fea... | 2,338 | 31.943662 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin_net.py | import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv
class GINNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GINNet, self).__init__()
dim = 32
nn1 = Sequential(Linear(num_feats, dim), Re... | 1,558 | 30.18 | 74 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GATNet(torch.nn.Module):
def __init__(self, num_feats, num_classes, dropout=0.6):
super(GATNet, self).__init__()
self.dropout = dropout
self.conv1 = GATConv(num_feats, 8, heads=8, dropout=self.dropou... | 1,074 | 36.068966 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat.py | import logging
import os
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
import config
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gat.gat_net import GATNet
class GAT(GNNBase):
def __init__(self, num_feats, num_... | 2,273 | 31.028169 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
import confi... | 4,883 | 39.363636 | 96 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
class SageNet(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(SageNet, self).__init__()
self.num_layers = 2
self.convs = torch.nn.ModuleList()
self.convs.a... | 2,154 | 37.482143 | 79 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCNNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GCNNet, self).__init__()
self.conv1 = GCNConv(num_feats, 16, cached=True, add_self_loops=False)
self.conv2 = GCNConv(16, num... | 781 | 31.583333 | 80 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gcn.gcn_net import GCNNet
import config
class GCN(GNNBase):
def __init__(self, num_feats, num_... | 2,221 | 31.202899 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlp.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.mlp.mlpnet import MLPNet
import config
class MLP(GNNBase):
def __init__(self, num_feats, num_c... | 2,518 | 31.294872 | 107 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlpnet.py | from torch import nn
import torch.nn.functional as F
class MLPNet(nn.Module):
def __init__(self, input_size, num_classes):
super(MLPNet, self).__init__()
self.xent = nn.CrossEntropyLoss()
self.layers = nn.Sequential(
nn.Linear(input_size, 250),
nn.Linear(250, 100),... | 668 | 23.777778 | 50 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_graph_partition.py | import logging
import time
import torch
from sklearn.model_selection import train_test_split
import numpy as np
from torch_geometric.data import Data
import torch_geometric as tg
import networkx as nx
from exp.exp import Exp
from lib_utils.utils import connected_component_subgraphs
from lib_graph_partition.graph_part... | 6,423 | 44.560284 | 155 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_attack_unlearning.py | import logging
import time
from collections import defaultdict
import numpy as np
import torch
import torch_geometric as tg
from torch_geometric.data import Data
from scipy.spatial import distance
import config
from exp.exp import Exp
from lib_graph_partition.graph_partition import GraphPartition
from lib_gnn_model.n... | 13,321 | 48.895131 | 154 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_node_edge_unlearning.py | import logging
import pickle
import time
from collections import defaultdict
import numpy as np
import torch
from torch_geometric.data import Data
import config
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from l... | 7,194 | 42.606061 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_dataset/data_store.py | import os
import pickle
import logging
import shutil
import numpy as np
import torch
from torch_geometric.datasets import Planetoid, Coauthor
import torch_geometric.transforms as T
import config
class DataStore:
def __init__(self, args):
self.logger = logging.getLogger('data_store')
self.args = ... | 9,583 | 44.421801 | 129 | py |
ZINBAE | ZINBAE-master/ZINBAE.py | """
Implementation of ZINBAE model
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate... | 10,280 | 39.636364 | 154 | py |
ZINBAE | ZINBAE-master/loss.py | import numpy as np
import tensorflow as tf
from keras import backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nan2inf(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf... | 4,141 | 30.142857 | 122 | py |
ZINBAE | ZINBAE-master/layers.py | from keras.engine.topology import Layer
from keras.layers import Lambda
from keras import backend as K
import tensorflow as tf
class ConstantDispersionLayer(Layer):
'''
An identity layer which allows us to inject extra parameters
such as dispersion to Keras models
'''
def __init__(... | 1,798 | 32.314815 | 98 | py |
ZINBAE | ZINBAE-master/ZINBAE0.py | """
Implementation of scDeepCluster for scRNA-seq data
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, ... | 8,888 | 39.040541 | 154 | py |
pyterpol | pyterpol-master/docs/conf.py | import os
# -*- coding: utf-8 -*-
#
# Pyterpol documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 12:34:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated ... | 10,169 | 27.647887 | 80 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/loss_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
def displacement_error(gt, preds, level_loss_coefs, polygon_map, disp_loss_params):
"""
:param g... | 9,420 | 44.73301 | 125 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/behavior_cloning_tf2.py | import copy
import os
import pickle
import numpy as np
import tensorflow as tf
from ray.rllib.policy import Policy as RllibPolicy
from tensorflow import keras
from tensorflow.compat.v1.keras.backend import get_session, set_session
from human_aware_rl.data_dir import DATA_DIR
from human_aware_rl.human.process_datafram... | 22,092 | 31.925484 | 144 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib.py | import numpy as np
import tensorflow as tf
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
class RllibPPOModel(TFModelV2):
"""
Model that will map environment states to action probabilities. Will be shared across agents
"""
def __ini... | 8,450 | 34.508403 | 110 | py |
CBA | CBA-main/vignette.py | #this file is to teach you how to use CBA
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb... | 30,327 | 46.76063 | 185 | py |
CBA | CBA-main/evaluation/evaluation_pancreas.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import keras as K
import pandas as pd
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.decomposition import PCA
import scanpy as sc
import scipy
import pickle
from sklearn... | 4,698 | 38.487395 | 118 | py |
CBA | CBA-main/lung/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import... | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/lung/lung_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics ... | 29,860 | 43.702096 | 177 | py |
CBA | CBA-main/pancreas/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import... | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/pancreas/pancreas_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics ... | 30,362 | 46.815748 | 185 | py |
CBA | CBA-main/species/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import... | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/species/species_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics ... | 29,319 | 43.969325 | 198 | py |
ColBERT | ColBERT-master/colbert/parameters.py | import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)... | 321 | 31.2 | 102 | py |
ColBERT | ColBERT-master/colbert/train.py | import os
import random
import torch
import copy
import colbert.utils.distributed as distributed
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.training.training import train
def main():
parser = Arguments(description='Training ColBERT with <query, positive passage, n... | 929 | 25.571429 | 128 | py |
ColBERT | ColBERT-master/colbert/evaluation/loaders.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
from colbert.evaluation.load_model import load_model
from colbert.utils.r... | 6,329 | 31.13198 | 117 | py |
ColBERT | ColBERT-master/colbert/evaluation/load_model.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
def load_model(args, do_print=True):
colbert = ColBERT.from_pretrai... | 919 | 30.724138 | 77 | py |
ColBERT | ColBERT-master/colbert/evaluation/ranking.py | import os
import random
import time
import torch
import torch.nn as nn
from itertools import accumulate
from math import ceil
from colbert.utils.runs import Run
from colbert.utils.utils import print_message
from colbert.evaluation.metrics import Metrics
from colbert.evaluation.ranking_logger import RankingLogger
fro... | 2,993 | 32.640449 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/loaders.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message
def get_parts(directory):
extension = '.pt'
parts = sorted([int(filename[: -1 * len(extension)]) for filename in os.listdir(directory)
if filename.ends... | 1,064 | 29.428571 | 107 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss.py | import os
import math
import faiss
import torch
import numpy as np
import threading
import queue
from colbert.utils.utils import print_message, grouper
from colbert.indexing.loaders import get_parts
from colbert.indexing.index_manager import load_index_part
from colbert.indexing.faiss_index import FaissIndex
def ge... | 3,899 | 32.333333 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/index_manager.py | import torch
import faiss
import numpy as np
from colbert.utils.utils import print_message
class IndexManager():
def __init__(self, dim):
self.dim = dim
def save(self, tensor, path_prefix):
torch.save(tensor, path_prefix)
def load_index_part(filename, verbose=True):
part = torch.load(f... | 435 | 17.956522 | 56 | py |
ColBERT | ColBERT-master/colbert/indexing/encoder.py | import os
import time
import torch
import ujson
import numpy as np
import itertools
import threading
import queue
from colbert.modeling.inference import ModelInference
from colbert.evaluation.loaders import load_colbert
from colbert.utils.utils import print_message
from colbert.indexing.index_manager import IndexMan... | 6,247 | 32.234043 | 117 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index_gpu.py | """
Heavily based on: https://github.com/facebookresearch/faiss/blob/master/benchs/bench_gpu_1bn.py
"""
import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.utils.utils import print_message
class FaissIndexGPU():
def __init__(self):
self.ngpu = faiss.get_num_... | 4,108 | 28.561151 | 108 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index.py | import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.indexing.faiss_index_gpu import FaissIndexGPU
from colbert.utils.utils import print_message
class FaissIndex():
def __init__(self, dim, partitions):
self.dim = dim
self.partitions = partitions
s... | 1,605 | 26.220339 | 85 | py |
ColBERT | ColBERT-master/colbert/training/training.py | import os
import random
import time
import torch
import torch.nn as nn
import numpy as np
from transformers import AdamW
from colbert.utils.runs import Run
from colbert.utils.amp import MixedPrecisionManager
from colbert.training.lazy_batcher import LazyBatcher
from colbert.training.eager_batcher import EagerBatcher
... | 4,585 | 35.983871 | 120 | py |
ColBERT | ColBERT-master/colbert/training/utils.py | import os
import torch
from colbert.utils.runs import Run
from colbert.utils.utils import print_message, save_checkpoint
from colbert.parameters import SAVED_CHECKPOINTS
def print_progress(scores):
positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)
print("... | 956 | 32 | 107 | py |
ColBERT | ColBERT-master/colbert/utils/logging.py | import os
import sys
import ujson
import mlflow
import traceback
from torch.utils.tensorboard import SummaryWriter
from colbert.utils.utils import print_message, create_directory
class Logger():
def __init__(self, rank, run):
self.rank = rank
self.is_main = self.rank in [-1, 0]
self.run =... | 3,185 | 30.86 | 100 | py |
ColBERT | ColBERT-master/colbert/utils/utils.py | import os
import tqdm
import torch
import datetime
import itertools
from multiprocessing import Pool
from collections import OrderedDict, defaultdict
def print_message(*s, condition=True):
s = ' '.join([str(x) for x in s])
msg = "[{}] {}".format(datetime.datetime.now().strftime("%b %d, %H:%M:%S"), s)
if... | 6,747 | 23.808824 | 91 | py |
ColBERT | ColBERT-master/colbert/utils/distributed.py | import os
import random
import torch
import numpy as np
def init(rank):
nranks = 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE'])
nranks = max(1, nranks)
is_distributed = nranks > 1
if rank == 0:
print('nranks =', nranks, '\t num_gpus =', torch.cuda.device_count())
if is_dist... | 614 | 22.653846 | 82 | py |
ColBERT | ColBERT-master/colbert/utils/amp.py | import torch
from contextlib import contextmanager
from colbert.utils.utils import NullContextManager
from packaging import version
v = version.parse
PyTorch_over_1_6 = v(torch.__version__) >= v('1.6')
class MixedPrecisionManager():
def __init__(self, activated):
assert (not activated) or PyTorch_over_1... | 1,178 | 28.475 | 94 | py |
ColBERT | ColBERT-master/colbert/ranking/index_part.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message, dotdict, flatten
from colbert.indexing.loaders import get_parts, load_doclens
from colbert.indexing.index_manager import load_index_part
from colbert.ranking.index_ranker import In... | 2,912 | 34.096386 | 104 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_retrieval.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.... | 1,819 | 34.686275 | 98 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_reranking.py | import os
import time
import torch
import queue
import threading
from collections import defaultdict
from colbert.utils.runs import Run
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, flatten, zipstar
fro... | 5,139 | 37.939394 | 115 | py |
ColBERT | ColBERT-master/colbert/ranking/index_ranker.py | import os
import math
import torch
import ujson
import traceback
from itertools import accumulate
from colbert.parameters import DEVICE
from colbert.utils.utils import print_message, dotdict, flatten
BSIZE = 1 << 14
class IndexRanker():
def __init__(self, tensor, doclens):
self.tensor = tensor
s... | 5,952 | 35.078788 | 120 | py |
ColBERT | ColBERT-master/colbert/ranking/retrieval.py | import os
import time
import faiss
import random
import torch
import itertools
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from... | 2,000 | 31.274194 | 102 | py |
ColBERT | ColBERT-master/colbert/ranking/reranking.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.... | 2,042 | 31.951613 | 91 | py |
ColBERT | ColBERT-master/colbert/ranking/faiss_index.py | import os
import time
import faiss
import random
import torch
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.utils.utils import print_message, flatten, batch
from colbert.indexing.loaders import load_doclens
class FaissIndex():
def __init__(self, index_path, ... | 4,820 | 38.195122 | 101 | py |
ColBERT | ColBERT-master/colbert/ranking/rankers.py | import torch
from functools import partial
from colbert.ranking.index_part import IndexPart
from colbert.ranking.faiss_index import FaissIndex
from colbert.utils.utils import flatten, zipstar
class Ranker():
def __init__(self, args, inference, faiss_depth=1024):
self.inference = inference
self.f... | 1,520 | 33.568182 | 122 | py |
ColBERT | ColBERT-master/colbert/modeling/inference.py | import torch
from colbert.modeling.colbert import ColBERT
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer
from colbert.utils.amp import MixedPrecisionManager
from colbert.parameters import DEVICE
class ModelInference():
def __init__(self, colbert: ColBERT, amp=False):
assert colber... | 3,132 | 34.602273 | 117 | py |
ColBERT | ColBERT-master/colbert/modeling/colbert.py | import string
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
from colbert.parameters import DEVICE
class ColBERT(BertPreTrainedModel):
def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):
... | 2,458 | 34.637681 | 112 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.