content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_renders_df(product_df, order_df, user_df, address_df, num_days=90):
"""
Renders - All requested renders from order, both customer and tester
"""
renders_df = pd.merge(product_df, order_df, how='left', on='order_id', suffixes=(None, '_order'))
renders_df = pd.merge(renders_df, user_df, how='l... | 26051e774a0be83687fa65f0a737cee50b88d55f | 2,800 |
def check_sparsity_level(model, config, ref_sparsity_level):
"""
Check that sparsity level of the model is equal to reference sparse level.
"""
sparsity_algo = MagnitudeSparsity(config, None)
all_weights_nodes = sparsity_algo._get_all_weights_nodes(model)
all_weights = [get_node_value(w_node).fl... | dc2921a56080ea82d39f3e5bcd42f51ef510d969 | 2,801 |
import typing
def new(
name: str,
data: typing.Optional[bytes] = b"",
digest_size: typing.Optional[int] = None,
*,
custom: typing.Optional[bytes] = None, # cshakes, kangarootwelve
key: typing.Optional[bytes] = None, # for blakes
) -> Hash:
"""
Instantiate a hash object.
Args:
... | aecb6a7783f39a25c781d6fb869b3aecac99d4bd | 2,802 |
def stringify_addresses(addresses):
"""
Converts a list of addresses into a string in the
`"John Doe" <john@example.com>, "Jane" <jane@example.com>"` format,
which can be directly used in the headers of an email.
Parameters
----------
addresses : (str or (str, str)) or list of (str or (str,... | 5a970730d39469a7aa66e220fcbd3fb4de28ecc5 | 2,803 |
def validator_map_size(string):
"""
Validator for map size input
Raises InputError with error description if string is not valid
:param string: String to check
:return: Bool, if success
"""
result = False
if string.isdigit():
size = int(string)
if 5 <= size <= 100:
... | bc25845dee1be3c0416a36ea0527d641136f9ac5 | 2,804 |
import requests
def get_short_token(app_id, app_secret, redirect_url, auth_code):
"""Get a short-lived access token."""
url = f"{OAUTH_URL}/access_token"
payload = {
"client_id": app_id,
"client_secret": app_secret,
"grant_type": "authorization_code",
"redirect_uri": redire... | 57d260876a19a9a7f52da66069c34f5223abcf19 | 2,805 |
import random
def random_chinese_name():
"""生成随机中文名字,二到三字
Returns:
str: 随机名字
"""
long = random.randint(2, 3)
first_name = random.choice(FIRST_NAME)
last_name = random.choice(LAST_NAME) if long == 2 else "{}{}".format(random.choice(LAST_NAME),
... | 863bc1a72d0ba28916e61f62c6c6a26da9c34f7a | 2,806 |
def generate_json_with_incorrect_prediction_value(features_definition: dict):
"""
Generates a list of dictonaries with keys from the given features_definitions, key in the dictionary
has a corresponding value not allowed by the given definition
"""
mock_requests = []
def_keys = list(features_def... | a0019822fbc701e8cdda61192bf564d1f72af9dd | 2,807 |
import os
def extract_text(file: UploadFile = File(...), lang: str = "eng", text_only: bool = False, custom_config: str = None):
"""
:param file:
:param lang: available: deu, eng
:return:
"""
filepath = "temp/" + file.filename
with file.file:
with open(filepath, "wb") as temp_file:... | 303eaf64baa591a3bc803f3eff85e405216fcb56 | 2,808 |
from pathlib import Path
def split_data(
args,
data_paths: t.List[Path],
val_ratio: float = 0.20,
test_ratio: float = 0.10,
random_state: int = 42,
) -> (t.List[str], t.List[str], t.List[str]):
"""
Split the data into train, val and test and save the splits to
file.
Args:
args
dat... | 2f9a0c0b3a90ca8a208d0afc91cc9fc8afcfb0ee | 2,809 |
def check_chains(sampler, pos, theta_lb, theta_ub,
mode_list=['bounds']):
""" check chains
1> reset out-of-bound chains
2> reset all chains to max likelihood neighbours
"""
mode_all = ['bounds', 'reset_all']
for mode in mode_list:
assert mode in mode_all
n_walkers... | ce0ccc2b9ab3ace56daf4e9dcfd54a4f845c0ca5 | 2,810 |
def get_index_train_test_path(_DATA_DIRECTORY_PATH, split_num, train = True):
"""
Method to generate the path containing the training/test split for the given
split number (generally from 1 to 20).
@param split_num Split number for which the data has to be generated
@param train ... | 7de884bd63417ad91d83703dc2cebe4f70629315 | 2,811 |
def read_output():
"""Reads the complex values from output file sink generated by gnuradio expt 2"""
complex_output = np.fromfile(file_sink_complex_expt2, dtype = 'complex64').reshape(-1,1)
plt.figure()
plt.plot(complex_output[11:18000].real)
plt.plot(complex_output[11:18000].imag)
plt.... | e472dc0615c548aa3839540aeec7c2a30361bd49 | 2,812 |
def get_wikipedia_pages_by_list(titles_or_page_ids):
"""
Get Wikipedia pages using list of titles or page ids.
@param titles_or_page_ids: List of titles or page ids.
@return: List of pages.
>>> titles_or_page_ids = 'Aromatics_byggnad'
>>> pages = get_wikipedia_pages_by_list(titles_or_page_ids... | 37ebb4747e6ecfbaa3da7cbb8be04619f0297e89 | 2,813 |
import warnings
def probit(s: pd.Series, error: str = "warn") -> pd.Series:
"""
Transforms the Series via the inverse CDF of the Normal distribution.
Each value in the series should be between 0 and 1. Use `error` to
control the behavior if any series entries are outside of (0, 1).
>>> impor... | 0f12c007da3ebc78b5f6cf073b4637dfae360a26 | 2,814 |
import json
def make_poem(token_nums, df, new_rowi):
"""
should return a series to be put at the end of the dataframe
Having a list in a df cell is apparently a pain so words are joined with "_"
"""
print(token_nums)
words = df.iloc[token_nums,0].to_list()
words_out = []
for word in w... | 1f950134e766fa701d5800ac4bcc5d7aca1847e5 | 2,815 |
def merge_multilinestrings(network):
"""Try to merge all multilinestring geometries into linestring geometries.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
... | 624ffd9f25af378451c087a9c9b11af25f06b7bc | 2,816 |
import os
def _read_yaml_definition(uarchdefs, path):
"""
:param uarchdefs:
:param path:
"""
uarchdef = read_yaml(os.path.join(path, "microarchitecture.yaml"), SCHEMA)
uarchdef["Path"] = path
uarchdefs.append(uarchdef)
_read_uarch_extensions(uarchdefs, path)
baseuarch = read_... | ff0f0a860483dc05b5386de050e16509fa01fc03 | 2,817 |
from datetime import datetime
def timestamp_old ():
""" store timestamp field """
timestamp = {}
timestamp['timestamp'] = False
try:
today = datetime.datetime.now()
# print('Timestamp: {:%Y-%m-%d %H:%M:%S}'.format(today))
timestamp['timestamp'] = "{:%Y-%m-%d %H:%M:%S}".format(tod... | 19638d49ff148d93ab9d6a2b2bebedf59899b71a | 2,818 |
import os
def download_model(model: str, saving_directory: str = None) -> str:
"""
Function that loads pretrained models from AWS.
:param model: Name of the model to be loaded.
:param saving_directory: RELATIVE path to the saving folder (must end with /).
Return:
- Path to model checkpoi... | 7520b78d1913ef275aecdd4f0b3c07ea5a164ef4 | 2,819 |
from datetime import datetime
def get_series(currency_id: str, interval: str) -> pd.DataFrame:
""" Get the time series for the given currency_id. Timestamps and dates are given in UTC time. """
url = f"https://api.coincap.io/v2/assets/{currency_id}/history"
js = request_and_jsonize_calm(url, params={'int... | e2420de9d35c0eb7f5b408bfc520d587c601c5ca | 2,820 |
import html
def formatTitle(title):
"""
The formatTitle function formats titles extracted from the scraped HTML code.
"""
title = html.unescape(title)
if(len(title) > 40):
return title[:40] + "..."
return title | 0a47e88ac024561dce18be140895dfd0825a9c37 | 2,821 |
def isPalindrome(x):
"""
:type x: int
:rtype: bool
"""
def sub_judge(start, end, string):
if start >= end:
return True
if string[start] == string[end]:
return sub_judge(start + 1, end - 1, string)
else:
return False
return sub_judge(0... | c7ecea3934e1cceb6574630eb06703f18f02832a | 2,822 |
def count_partitions(n, m):
"""Count the partitions of n using parts up to size m.
>>> count_partitions(6, 4)
9
>>> count_partitions(10, 10)
42
"""
if n == 0:
return 1
elif n < 0:
return 0
elif m == 0:
return 0
else:
with_m = count_partitions(n-m,... | 941984ffd1912ff66fd6a006ccc2bc58fc41eaa8 | 2,823 |
import re
def parse_pairs(string):
"""
Converts string where are data wrote using such method:
Key: Value
To dictionary where "Key" is key and "Value" is value. If there's newline, space and dot or text - that must be
added to previous value.
:param string: string that contains data to conv... | 95e46b7a6abf8885630f6151b7fde380c6bc0fcf | 2,824 |
def check_yum_package(package_name, logger):
"""
check if a yum package is installed
:param package_name: name to be checked
:param logger: rs log obj
:return: boolean
"""
logger.trace("Checking if package '{}' is installed.", package_name)
command = "yum list installed {}".format(package_name)
try:
execute_... | 777f041d03279fd2a8e8a4dcfa1c8e5df9b42b44 | 2,825 |
def get(filename, name):
"""
Read a given element from an SVG file
"""
root = etree.parse(filename).getroot()
return root.xpath("//*[@id='%s']" % name)[0].get("d") | b2ef579ab1521ebacda940818caa27a341d049dd | 2,826 |
def softmaxCostAndGradient(predicted, target, outputVectors, dataset):
""" Softmax cost function for word2vec models
Implement the cost and gradients for one predicted word vector
and one target word vector as a building block for word2vec
models, assuming the softmax prediction function and cross
... | a7215d75a58bcc19d2b779769b74bcb96beb8d6c | 2,827 |
def get_osf_meta_schemas():
"""Returns the current contents of all known schema files."""
schemas = [
ensure_schema_structure(from_json(json_filename))
for json_filename in OSF_META_SCHEMA_FILES
]
return schemas | cf3a36b589885faebc1ec509ec9a45470ad1efef | 2,828 |
def update(request, bleep_id):
"""
Process a bleep form update
"""
if request.method == 'POST':
form = BleepForm(request.POST)
if form.is_valid():
# Process and clean the data
# ...
# update the form with current bleep data
b = Bleep.objec... | eee08d0c589d3ff1c58e1deb66a1d0b10408785e | 2,829 |
import email
import binascii
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case s... | a8cddb9cf196efd4715511418414cc429dd54fe7 | 2,830 |
def user_from_identity():
"""Returns the User model object of the current jwt identity"""
username = get_jwt_identity()
return User.query.filter(User.username == username).scalar() | 632a3ed7ee047c6358582a60b1d39a4cca97eb7e | 2,831 |
from typing import Set
from re import T
def combine(first: Set[T], second: Set[T]) -> Set[T]:
"""Combine two sets of tuples, prioritising the second."""
result = second.copy()
for pf in first:
include = True
for pr in result:
if pf[0] == pr[0]:
include = False
... | e28b6884b63c055c1224ffc7d19613581f6cacc8 | 2,832 |
def minsize<VAL1>(event, context):
"""
AutoScalingGroup起動台数調整
"""
""" Create Connection """
try:
client = boto3.client('autoscaling', region_name = '<Region>')
except:
print('Connection Error')
return 1
""" Update AutoScalingGroup """
try:
client.update_... | 8eccb127ec7f1468706b80af83ccd6641460f22a | 2,833 |
def thin(image, n_iter=None):
"""
Perform morphological thinning of a binary image
Parameters
----------
image : binary (M, N) ndarray
The image to be thinned.
n_iter : int, number of iterations, optional
Regardless of the value of this parameter, the thinned image
is r... | d554d56c3a0146fb487d2b3a1f4e8a3f033e3559 | 2,834 |
def rings(xgr):
""" rings in the graph (minimal basis)
"""
xgrs = [bond_induced_subgraph(xgr, bnd_keys)
for bnd_keys in rings_bond_keys(xgr)]
return tuple(sorted(xgrs, key=frozen)) | 288437236a6c5367a67fffb0a8d73c01b7864d67 | 2,835 |
def prior_search(binary, left_fit, right_fit, margin=50):
"""
searches within the margin of previous left and right fit indices
Parameters:
binary: np.ndarray, binary image from the video
left_fit: list, left line curve fitting coefficients
right_fit: list, right line curve fitting c... | 60a5838fef6a56060471dcfd54ebb4ebb43acbb4 | 2,836 |
import tqdm
def _parallel_iter(par, iterator):
"""
Parallelize a partial function and return results in a list.
:param par: Partial function.
:param iterator: Iterable object.
:rtype: list
:return: List of results.
"""
pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1)
o... | e71a2c91421fc6abd10b48787eec2c4de5ccd03b | 2,837 |
def reassign_labels(class_img, cluster_centers, k=3):
"""Reassigns mask labels of t series
based on magnitude of the cluster centers.
This assumes land will always be less than thin
cloud which will always be less than thick cloud,
in HOT units"""
idx = np.argsort(cluster_centers.sum(axis=1))
... | bff2e9e1e0a9db4b7bd59e8e84ac0689e1947e1f | 2,838 |
import argparse
def parse_args():
"""parse args with argparse
:returns: args
"""
parser = argparse.ArgumentParser(description="Daily Reddit Wallpaper")
parser.add_argument("-s", "--subreddit", type=str, default=config["subreddit"],
help="Example: art, getmotivated, wallpape... | deb48dd5760b4d640a132518f5886d39a7b54b8e | 2,839 |
def gridarray(a, b):
"""
Given two arrays create an array of all possible pairs, a 2d grid.
E.g. a = [1, 2], b = [2, 4, 5], gridarray(a,b) = [[1,2], [1,4],
[1,5], [2,2], [2,4], [2,5]]. May be used repeatedly for increasing
dimensionality.
DEPRECIATED: Use A, B = np.meshgrid(a, b).
... | 5320655c718cd5be0d2321079fb1d77719ac1b39 | 2,840 |
import unicodedata
def has_alphanum(s):
"""
Return True if s has at least one alphanumeric character in any language.
See https://en.wikipedia.org/wiki/Unicode_character_property#General_Category
"""
for c in s:
category = unicodedata.category(c)[0]
if category == 'L' or ca... | 3ac778e5f415bce4fa1e8667a1599ca73367b733 | 2,841 |
import os
def src_path_join(*kwargs):
"""
reutrns path to the file whose dir information are provided in kwargs
similar to `os.path.join`
:param kwargs:
:return:
"""
return os.path.join(get_src_dir(), *kwargs) | fe34197a5da000b0d9e86db6c3596c6e38ab32df | 2,842 |
def get_values_heatmap(entity, measurement, case_id, categorical_filter, categorical, numerical_filter_name, from1,
to1, measurement_filter, date, r):
""" Get numerical values from numerical table from database
get_values use in heatmap, clustering
r: connection with database
... | f3eb7b422f2dd41b5dd2e1378a41cc915f02c34e | 2,843 |
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
from typing import Optional
from typing import Union
from typing import KeysView
import asyncio
from datetime import datetime
async def unwrap_pull_requests(prs_df: pd.DataFrame,
precomputed_... | 9ad6db247a3b3ccf2b6fcf5573195035705613c8 | 2,844 |
import collections
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session... | e73c05e4718d1a67b6dea07650f9df3406c76daf | 2,845 |
def schemaGraph (ds, ns, ontology_uri=None):
"""
schemaGraph (datasource, namespace, [ontology_uri,])
Return an RDF graph filled with axioms describing the datasource.
@param ds: the DataSource whose schema has to be converted
@param ns: the namespace uri of the created cla... | 7c5f20d6795a06776fe83a77e3f573f6da11ff3e | 2,846 |
def is_remote(path):
"""Determine whether a file is in a remote location (which can be handled) based on prefix of connection string."""
for token in ["s3://", "http://", "https://"]: # add
if path.startswith(token):
return True
return False | b459e20104b6e0e326a86ef44b53e18a335ded96 | 2,847 |
def saliency_map(output, input, name="saliency_map"):
"""
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in outpu... | ae59afa3a3f449ccbee22644644699dd7033bdf0 | 2,848 |
import io
import contextlib
def pretty_tree(*, program: str = None, file: str = None) -> str:
"""Get a pretty-printed string of the parsed AST of the QASM input.
The input will be taken either verbatim from the string ``program``, or read
from the file with name ``file``. Use exactly one of the possible... | f5436af958dd2bff7aaa297daaa27ef817c619a7 | 2,849 |
def get_models(datasets):
"""It obtains the models used into the experiments"""
dataframe = pd.read_csv('../results/' + datasets[0] + '/results.csv', sep=';')
models = dataframe['MODEL'].unique()
return models.tolist() | f22419d9784630746d4f0d35765f9ffc8314c7fd | 2,850 |
def _gr_xmin_ ( graph ) :
"""Get x-min for the graph
>>> xmin = graph.xmin()
"""
#
_size = len ( graph )
if 0 == _sise : return 0
#
x_ = ROOT.Double(0)
v_ = ROOT.Double(0)
graph.GetPoint ( 0 , x_ , v_ )
#
return x_ | 5b53588f0e23d42627c205969c700a16b871ff58 | 2,851 |
def eliminate(values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(valu... | 5e28acbe0ea7cd528e9e1dc77a411d20bd253a9a | 2,852 |
def route_distance(route):
"""
returns the distance traveled for a given tour
route - sequence of nodes traveled, does not include
start node at the end of the route
"""
dist = 0
prev = route[-1]
for node in route:
dist += node.euclidean_dist(prev)
prev = node
return dist | 227b6476f6abd9efdf690062e0d4034c4ece2408 | 2,853 |
import torch
def adjust_matrix(matrix):
"""
Sorting matrix cols.
matrix: can be a numpy 2d-array or pytorch 2d-Tensor
Return
------
adjusted pytorch 2d-tensor
"""
if isinstance(matrix, np.ndarray):
tmp = torch.from_numpy(matrix).clone() # ?
else:
tmp = matrix.clon... | b045ef16e8b359ff873176265a4ce3e96a973504 | 2,854 |
def get_student_graph(pool, student, student_friends, friends_students, need_spinglass=False):
"""
Получение социального графа пользователя.
:param pool: пул процессов (библиотека multiprocessing)
:param student: идентификатор пользователя
:param student_friends: список друзей пользователя
:par... | 90adffdeea74b5176b24b1a57e86d52a86b38046 | 2,855 |
import os
def wdirectory(path):
"""
Change the work directory for a specific path of the data
___
path: string, data path in the system
"""
return os.chdir(path) | ca44546ca3d35e85c3dc339fd53fd0d79bf63ecd | 2,856 |
def ile_robil_czy_mial_dobe(dzien, zp, grafik):
"""Czy miał dobę danego dnia?"""
godzin = 0
for wpis in Wpis.objects.filter(user=zp.user, grafik=grafik, dzien=dzien):
godzin += wpis.pion.ile_godzin(dzien)
return (godzin, godzin == 24) | 1dd9223b29fc330ad70ea8cd3fc3c4194cfd9063 | 2,857 |
def load_teacher():
"""
load ready-to-go teacher from "https://towardsdatascience.com/advanced-dqns-playing-pac-man-with-deep-reinforcement-learning-3ffbd99e0814"
:return: a trained teacher model trained with double dueling dqn with prioritized ER
"""
dqn = DQNPacman(input_size=dense_config.input_si... | 1428b77b7387f7b0307558e719da65e275a10abf | 2,858 |
from typing import List
import json
def exception_logged(result_output: str, exc: Exception) -> bool:
"""Small utility to search click result output for a specific excpetion .
Args:
result_output: The click result output string to search.
exc: The exception to search for.
Returns:
... | 2148cabf18c0f7d36311e913160191759ca1ce6b | 2,859 |
def hello(world):
"""Hello, You!"""
return "Hello, {}!".format(world) | d08d2685d3341f0b6474dbd40fc7f9650ddc1092 | 2,860 |
def uniform_transition_matrix(p=0.01, N=24):
"""Computes uniform transition matrix
Notebook: C5/C5S3_ChordRec_HMM.ipynb
Args:
p (float): Self transition probability (Default value = 0.01)
N (int): Column and row dimension (Default value = 24)
Returns:
A (np.ndarray): Output tr... | d5f4ea5516de7b9d0d3a6f41ce28921634c2f309 | 2,861 |
def __is_connected__(g):
"""
Checks if a the directed acyclic graph is connected.
:return: A boolean indicating if the graph is connected.
"""
u = __convert_to_undirected_graph__(g)
return nx.is_connected(u) | bd1516ac8051326932a38bf348f8b1498bed2f07 | 2,862 |
def get_segments(tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)... | f3510e04ae44d6d479fe4325dbd257fbcab6cdbc | 2,863 |
import torch
def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
if input.is_cuda and linear_func is F.linear and fusedBackend is not None:
gi = linear_func(i... | b21c4efc7b4a2f4caa022d76acfc496444f4e991 | 2,864 |
import glob
def _toggle_debug_mode() -> bool:
"""Set debug to true or false.
Can be used for debugging purposes such that exceptions are raised (including the stack trace)
instead of suppressed.
Note: the debug status is always printed when executing this method.
Returns:
Boolean indica... | 96d95c7b54d2760af3bc37186cdff759f270af9c | 2,865 |
import os
import sys
def GetAppBasename():
"""Returns the friendly basename of this application."""
return os.path.basename(sys.argv[0]) | fbc858bc4c016ef18bc31fb07b2d085f47ff3976 | 2,866 |
import subprocess
def connect(dbtype: str, **kwargs) -> subprocess.Popen:
""" Creates a connection to the database server """
# create subprocess
process = subprocess.Popen('/bin/bash', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10)
# connect process t... | 65f3bc4470a8daa5a8442df9e0ce239c1939f153 | 2,867 |
def client(mock_settings) -> StructurizrClient:
"""Provide a client instance with the mock settings."""
return StructurizrClient(settings=mock_settings) | df3c86aed9a924b1ce71ee1be6df9598fa67e39d | 2,868 |
def mean_iou(y_true, y_pred, **kwargs):
"""
Compute mean Intersection over Union of two segmentation masks, via Keras.
Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return seg_metrics(y_true, y_pred, metric_name='iou', drop_last = False, **kwargs) | 8aec0cd7ce6413c36f187a9cef855f380cd6959b | 2,869 |
from copy import deepcopy
from futile.Utils import function_signature_regenerator as fsr
def get_python_function(target_kwargs_function,func_name,func_spec):
"""Convert a argparse spec into a python function
This function provides a python function with a signature indicated by the ``fun_spec`` dictionary
... | 6f25fd3d59bac5d0827345c36a348e8cac7350ac | 2,870 |
def ydbdr2rgb(ydbdr, *, channel_axis=-1):
"""YDbDr to RGB color space conversion.
Parameters
----------
ydbdr : (..., 3, ...) array_like
The image in YDbDr format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which a... | 9646d75712fc607cace24eb6189228ea2d5308f1 | 2,871 |
import typing
def execute(
connection_info: NodeConnectionInfo,
block_id: typing.Union[None, bytes, str, int] = None
) -> dict:
"""Returns current auction system contract information.
:param connection_info: Information required to connect to a node.
:param block_id: Identifier of a finalised... | 9c318c3a1b63b9b30033290c56ac96a589c46104 | 2,872 |
def direction_to_point(pos1: IntVector2D, pos2: IntVector2D) -> Grid4TransitionsEnum:
"""
Returns the closest direction orientation of position 2 relative to position 1
:param pos1: position we are interested in
:param pos2: position we want to know it is facing
:return: direction NESW as int N:0 E:... | d13246d64b79050b19189d77047c1390d3d40448 | 2,873 |
def handle_nullboolean(field, request_get):
"""Build a list of chips for NullBooleanField field."""
value = yesno(
field.value(),
pgettext_lazy('Possible values of boolean filter', 'yes,no,all'))
return [{
'content': CHIPS_PATTERN % (field.label, value),
'link': get_cancel_ur... | 0544ffb0f4054fe6c6447b811fb8a8b8dbf0ca46 | 2,874 |
def rob(nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == [] or len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
runningTotal = [-1, -1]
runningTotal[0] = nums[0]
runningTotal[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
... | e58e4d04cbe490b9bd2957d23c5dfd42e92aa0fb | 2,875 |
def data_coded_table(request, project_pk):
"""This returns the labeled data.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
data: a list of data information
"""
project = Project.objects.get(pk=project_pk)
data_objs = DataLabel.objects.f... | b6b98a85a80986c6ca045f79e9e478b798e81d4e | 2,876 |
def when(name, converters=None):
"""When step decorator.
:param name: Step name.
:param converters: Optional `dict` of the argument or parameter converters in form
{<param_name>: <converter function>}.
:param parser: name of the step parser to use
:param parser_args: optional... | cbcca0184ba8951e60e8324addef0497888956ec | 2,877 |
def displacement(current: np.ndarray, previous: np.ndarray) -> np.array:
"""Computes the displacement vector between the centroids of two storms.
:param current: the intensity-weighted centroid of the storm in the current time slice, given as a tuple.
:param previous: the intensity-weighted centroid of the ... | 551ee24a92c2709f0af630d8fab726648da5d026 | 2,878 |
def update_datapackage(datapackage, mappings):
"""Update the field names and delete the `maps_to` properties."""
for i, resource in enumerate(datapackage['resources']):
fields = []
for field in resource['schema']['fields']:
fiscal_key = mappings[i][field['name']]
if fi... | f56cf5917331a55d2ac0d5783e0b9c3962eccb5f | 2,879 |
import pybel
def get_molpro_mol(logfile):
"""
Returns xyz file from molpro logfile.
"""
return pybel.readfile('mpo',logfile).next() | 597418e59f722e4a3f30b9652266de9f131346cf | 2,880 |
def tbody(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", transl... | 2e47b4e4d995b4100ee9fcb2f408f8f1816e768e | 2,881 |
import json
from sys import path
def create_controller():
"""
1. Check the token
2. Call the worker method
3. Show results
"""
minimum_buffer_min = 3
token_ok = views.ds_token_ok(minimum_buffer_min)
if token_ok and 'envelope_id' in session:
# 2. Call the worker method
a... | 5a037ce622c42e11143856625934089a01ea7909 | 2,882 |
import six
def pack(number, word_size = None, endianness = None, sign = None, **kwargs):
"""pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str
Packs arbitrary-sized integer.
Word-size, endianness and signedness is done according to context.
`word_size` can be any positi... | e7d6a356f56e9e9c05e20af91a03c9fff2638773 | 2,883 |
def getSourceUrls(db):
"""获取未被爬取的文献来源链接"""
sql = """
SELECT DISTINCT
re_article_source.url_source
FROM
re_article_source
LEFT JOIN source ON re_article_source.url_source = source.url
WHERE
source.url IS NULL
... | edc84e224b76ff84ffef5f12845add6680ccb25d | 2,884 |
def ML_bump(x,v=None,logger=None):
"""
ML fit of the bump function
Parameters
----------
x : (n,d) ndarray
coML estimatearaites
v : (n,) ndarray
weight for each sample
Returns
-------
mu : (n,d) ndarray
bump mean parameter (for each dimension)
sigma : (... | 2ac410b6a756d97df1dbbd42b571a38835efb5d0 | 2,885 |
import argparse
def parse_arguments(args_to_parse):
""" Parse the command line arguments.
"""
description = "Find targets which contain a None reference"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-d', '--directory-to-search', type=str, required=True,
... | 6f5b849356baf3bece4731918240d7ed5e692bb8 | 2,886 |
def function_f1a(x):
"""Function with one argument, returning one value.
:type x: types.IntType
:rtype: types.StringType
"""
return '{}'.format(x) | 2ccdaa819ad83902353a1c823fae7f4db3eca487 | 2,887 |
from typing import Union
import socket
def is_port_in_use(hostname: str, port: Union[int, str]) -> bool:
"""
Check if TCP/IP `port` on `hostname` is in use
"""
with socket() as sock:
try:
sock.bind((hostname, int(port)))
return False
except OSError as err:
... | 958e9dced4f5b3850f1b2f66f42e8ce21b7d3548 | 2,888 |
def _pos_from_before_after(
before: int, after: int, length: int, base0: bool
) -> int:
"""Get the position to insert from before and after"""
if before is not None and after is not None:
raise ValueError("Can't specify both `_before` and `_after`.")
if before is None and after is None:
... | 8a3fe871c144b00d6bcb4f1286726124f48302de | 2,889 |
import copy
def prep_incorporation_correction_filing(session, business, original_filing_id, payment_id, option,
name_change_with_new_nr):
"""Return a new incorporation correction filing prepped for email notification."""
filing_template = copy.deepcopy(CORRECTION_INCOR... | 604667c22087304e6f1ffd4e9a51596722952f9e | 2,890 |
import logging
def get_logger():
"""
Return the custom showyourwork logger.
Sets up the logging if needed.
"""
logger = logging.getLogger("showyourwork")
# Add showyourwork stream & file handlers
if not logger.handlers:
# Root level
logger.setLevel(logging.DEBUG)
... | 051e263422e84d10fa99fa9627a7bff9e5cc9f0b | 2,891 |
import random
def web_videos_random_archived(channel):
"""Play random archived video.
Chooses random archived video from selected channel and redirects to its
detail page view.
Args:
channel (str): YouTube channel ID.
Returns:
flask.Response: Selected video detail view.
... | 6d05832fb4529f3c17b6f6dbdc8c900642cdcbdf | 2,892 |
def fundamental_mode_mfd_marcuse(wl, r, na):
"""Calculates the mode field diameter of the fundamental mode with vacuum wavelength wl using Marcuse's equation.
:param wl: Wavelength of the mode
:type wl: float
:param r: Core radius
:type r: float
:param na: Core numerical aperture
:type na: ... | 570d680e5c23b5e5fb5d3528a6bd1fc9d6c55168 | 2,893 |
def generate_ansible_coverage_config(): # type: () -> str
"""Generate code coverage configuration for Ansible tests."""
coverage_config = '''
[run]
branch = True
concurrency = multiprocessing
parallel = True
omit =
*/python*/dist-packages/*
*/python*/site-packages/*
*/python*/distutils/*
*/pys... | 88fa630613ff12cb5fd33f90883393ee21b574fa | 2,894 |
def gauss_smooth_shift(input, shift, stddev, scale=1.0):
"""
smooths the input with gaussian smooothing with standarddeviation and shifts its delay positions
:param input: The input array
:param shift: the amount of indices to shift the result
:param the stddev for the gaussian smoothing (in index ... | 5bf614e544dc13bd190c7bb260f3962557d143fd | 2,895 |
def mel_to_hz(mel):
"""From Young et al. "The HTK book", Chapter 5.4."""
return 700.0 * (10.0**(mel / 2595.0) - 1.0) | 8306b95bcdf866dda0759a71c2d5d538155173df | 2,896 |
def create_app(path=None, user_content=False, context=None, username=None,
password=None, render_offline=False, render_wide=False,
render_inline=False, api_url=None, title=None, text=None,
autorefresh=None, quiet=None, grip_class=None):
"""
Creates an Grip applicatio... | 4a1c46677a71b18f3369f9158eacf5050ca85f87 | 2,897 |
import subprocess
import re
def get_simulator_version():
""" Get the installed version of XPP
Returns:
:obj:`str`: version
"""
result = subprocess.run(["xppaut", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if result.returncode != 0:
raise RuntimeError... | 93949c25d79553a3cab361f3d156f723d52d5560 | 2,898 |
def enumerate_joint(variables, e, P):
"""Return the sum of those entries in P consistent with e,
provided variables is P's remaining variables (the ones not in e)."""
if not variables:
return P[e]
Y, rest = variables[0], variables[1:]
return sum([enumerate_joint(rest, extend(e, Y, y), P)
... | 649dfdf0b913f7c4fb74d18d73cd8684356d4418 | 2,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.