complexity int64 1 139 | fun_name stringlengths 1 80 | code stringlengths 101 62.2k | commit_id stringlengths 40 40 | ast_errors stringlengths 0 3.11k | ast_levels int64 6 36 | file_name stringlengths 5 79 | n_ast_nodes int64 17 19.2k | commit_message stringlengths 3 15.3k | d_id int64 12 121k | n_ast_errors int64 0 9 | n_whitespaces int64 4 10.8k | token_counts int64 5 3.06k | vocab_size int64 4 1.11k | id int64 20 338k | n_words int64 4 4.82k | repo stringlengths 3 22 | n_identifiers int64 2 176 | path stringlengths 7 134 | language stringclasses 1
value | nloc int64 1 413 | documentation dict | url stringlengths 31 59 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2 | call_bc | def call_bc(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bc",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPO... | ea964109d654394cc0a5237e6ec5510ba6404097 | 11 | dd_controller.py | 97 | Crypto menu refactor (#1119)
* enabled some crypto commands in dd to be called independent of source loaded
* support for coin_map_df in all dd functions + load ta and plot chart refactor
* updated tests and removed coingecko scrapping where possible
* removed ref of command from hugo
* updated pycoingecko... | 83,548 | 0 | 130 | 61 | 20 | 281,136 | 22 | OpenBBTerminal | 18 | gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py | Python | 15 | {
"docstring": "Process bc command\n Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io\n in which you can see all blockchain data e.g. all txs, all tokens, all contracts...\n ",
"language": "en",
"n_whitespaces": 84,
... | https://github.com/OpenBB-finance/OpenBBTerminal.git | |
1 | communicate | async def communicate(self):
assert self._input.is_file()
self._output.open("w").close()
return (None, None)
| fa2ad657482aca9dc628e6d7062b8badf2706bb6 | 10 | conftest.py | 57 | v4 init | 5,330 | 0 | 37 | 32 | 9 | 30,126 | 9 | spotify-downloader | 7 | tests/conftest.py | Python | 4 | {
"docstring": "\n Ensure that the file has been download, and create empty output file,\n to avoid infinite loop.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | https://github.com/spotDL/spotify-downloader.git | |
1 | test_multi_trial_reuse_with_failing | def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2"
register_trainable("foo2", MyResettableClass)
[trial1, trial2, trial3, trial4] = tune.run(
"foo2",
config={
"fail": tune.grid_search([False, True, False, False])... | 1510fb2cd631b2776092fb45ee4082e5e65f16f8 | 15 | test_actor_reuse.py | 183 | [air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016)
Includes/depends on #30777
TLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager.
Ray Tune's resource management... | 31,314 | 0 | 141 | 113 | 36 | 138,092 | 42 | ray | 19 | python/ray/tune/tests/test_actor_reuse.py | Python | 17 | {
"docstring": "Test that failing trial's actors are not reused.\n\n - 2 trials can run at the same time\n - Trial 1 succeeds, trial 2 fails\n - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor\n - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor\n ",
"langua... | https://github.com/ray-project/ray.git | |
4 | size_bytes | def size_bytes(self) -> int:
size = 0
has_size = False
for m in self.get_metadata():
if m.size_bytes is not None:
has_size = True
size += m.size_bytes
if not has_size:
return -1
else:
return size
| b5b4460932505912d88d65134931e0da170fb467 | 11 | block_list.py | 84 | Support creating a DatasetPipeline windowed by bytes (#22577) | 33,465 | 0 | 138 | 50 | 24 | 145,482 | 33 | ray | 7 | python/ray/data/impl/block_list.py | Python | 12 | {
"docstring": "Returns the total size in bytes of the blocks, or -1 if not known.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | https://github.com/ray-project/ray.git | |
2 | unregister | def unregister(self, name):
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(
f"Cannot unregister builtin color sequence {name!r}")
self._color_sequences.pop(name, None)
_color_sequences = ColorSequenceRegistry()
| 0abe0ce2f2748d1d0383154d045da3609a4b871b | 11 | colors.py | 65 | Add a registry for color sequences
Color sequences are simply lists of colors, that we store by name in
a registry. The registry is modelled similar to the ColormapRegistry
to 1) support immutable builtin color sequences and 2) to return copies
so that one cannot mess with the global definition of the color sequence
t... | 23,145 | 0 | 66 | 31 | 20 | 108,335 | 20 | matplotlib | 8 | lib/matplotlib/colors.py | Python | 5 | {
"docstring": "\n Remove a sequence from the registry.\n\n You cannot remove built-in color sequences.\n\n If the name is not registered, returns with no error.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 22,
"vocab_size": 21
} | https://github.com/matplotlib/matplotlib.git | |
10 | source_from_cache | def source_from_cache(path):
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
path = _os.fspath(path)
head, pycache_filename = _path_split(path)
found_in_pycache_prefix = False
if sys.pycache_prefix is not None:
stripped_... | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 15 | _bootstrap_external.py | 378 | add python 3.10.4 for windows | 55,140 | 0 | 366 | 212 | 79 | 218,114 | 121 | XX-Net | 33 | python3.10.4/Lib/importlib/_bootstrap_external.py | Python | 30 | {
"docstring": "Given the path to a .pyc. file, return the path to its .py file.\n\n The .pyc file does not need to exist; this simply returns the path to\n the .py file calculated to correspond to the .pyc file. If path does\n not conform to PEP 3147/488 format, ValueError will be raised. If\n sys.imple... | https://github.com/XX-net/XX-Net.git | |
1 | test_deps_sorted | def test_deps_sorted(self):
from airflow.operators.empty import EmptyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTa... | 49e336ae0302b386a2f47269a6d13988382d975f | 12 | test_dag_serialization.py | 186 | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | 9,201 | 0 | 256 | 109 | 40 | 47,665 | 49 | airflow | 25 | tests/serialization/test_dag_serialization.py | Python | 21 | {
"docstring": "\n Tests serialize_operator, make sure the deps is in order\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | https://github.com/apache/airflow.git | |
1 | load_workflow_status | def load_workflow_status(self):
return self._status_storage.load_workflow_status(self._workflow_id)
| f67871c1f7e79adc727b2a15311d9332832d2e8a | 8 | workflow_storage.py | 30 | [workflow] Fast workflow indexing (#24767)
* workflow indexing
* simplify workflow storage API
* Only fix workflow status when updating the status.
* support status filter | 31,892 | 0 | 18 | 17 | 4 | 140,203 | 4 | ray | 4 | python/ray/workflow/workflow_storage.py | Python | 2 | {
"docstring": "Load workflow status. If we find the previous status updating failed,\n fix it with redo-log transaction recovery.",
"language": "en",
"n_whitespaces": 23,
"n_words": 17,
"vocab_size": 17
} | https://github.com/ray-project/ray.git | |
3 | isCPythonOfficialPackage | def isCPythonOfficialPackage():
# For macOS however, it's very knowable.
if isMacOS() and sys.executable.startswith(
"/Library/Frameworks/Python.framework/Versions/"
):
return True
return False
| c723f658e8c11ec92d6ef90c2f42527c67d3f318 | 9 | PythonFlavors.py | 44 | Added CPython Official flavor, so far only detected on macOS | 42,789 | 0 | 48 | 23 | 18 | 178,678 | 19 | Nuitka | 5 | nuitka/PythonFlavors.py | Python | 6 | {
"docstring": "Official CPython download, kind of hard to detect since self-compiled doesn't change much.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | https://github.com/Nuitka/Nuitka.git | |
2 | update_existing_attachments | def update_existing_attachments(job):
# Patch attachments that were ingested on the standalone path.
with sentry_sdk.start_span(op="tasks.post_process_group.update_existing_attachments"):
try:
from sentry.models import EventAttachment
event = job["event"]
Event... | bc59434031930199dcdc056943c2ba4a17bbd5c8 | 15 | post_process.py | 116 | ref(perf-issues): Modularize post_process_group (ISP-11) (#39594)
Fully modularizes `post_process_group` as final step before adding
multiple event types to it. | 18,118 | 0 | 126 | 66 | 33 | 86,527 | 33 | sentry | 18 | src/sentry/tasks/post_process.py | Python | 10 | {
"docstring": "\n Attaches the group_id to all event attachments that were either:\n\n 1) ingested prior to the event via the standalone attachment endpoint.\n 2) part of a different group before reprocessing started.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 30,
"vocab_size": 26
} | https://github.com/getsentry/sentry.git | |
1 | list | def list(self, ignore_patterns):
raise NotImplementedError(
"subclasses of BaseFinder must provide a list() method"
)
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 8 | finders.py | 25 | Refs #33476 -- Reformatted code with Black. | 50,701 | 0 | 46 | 13 | 14 | 204,338 | 14 | django | 4 | django/contrib/staticfiles/finders.py | Python | 4 | {
"docstring": "\n Given an optional list of paths to ignore, return a two item iterable\n consisting of the relative path and storage instance.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 20
} | https://github.com/django/django.git | |
2 | test_less_jobs | def test_less_jobs(self, api, started_job, batch):
jobs = [started_job for _ in range(49)]
update_in_batch(api=api, jobs=jobs)
assert started_job.update_job.call_count == 49
assert len(api.new_batch.return_value) == 49
batch.execute.assert_called_once()
| a3aae8017a0a40ff2006e2567f71dccb04c997a5 | 10 | test_async_job.py | 93 | 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactori... | 563 | 0 | 65 | 60 | 20 | 3,800 | 23 | airbyte | 16 | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py | Python | 6 | {
"docstring": "Should update all jobs when number of jobs less than max size of batch",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 12
} | https://github.com/airbytehq/airbyte.git | |
1 | test_load_global_local_flag_config | def test_load_global_local_flag_config(self):
global_config =
local_config =
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
... | dd9084523e365e637443ea351eaaaa25f52d8412 | 13 | config_test.py | 292 | Report sharing removal (#4260)
The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. T... | 26,359 | 0 | 257 | 163 | 58 | 118,684 | 70 | streamlit | 26 | lib/tests/streamlit/config_test.py | Python | 31 | {
"docstring": "Test that CLI flags have higher priority than both\n ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.\n \n [theme]\n base = \"dark\"\n font = \"sans serif\"\n textColor = \"#FFFFFF\"\n \n [theme]\n base = \"light\"\n... | https://github.com/streamlit/streamlit.git | |
2 | test_pickle_binary_object_compression | def test_pickle_binary_object_compression(compression):
df = tm.makeDataFrame()
# reference for compression
with tm.ensure_clean() as path:
df.to_pickle(path, compression=compression)
reference = Path(path).read_bytes()
# write
buffer = io.BytesIO()
df.to_pickle(buffer, co... | 864729813a0203af8bb0d30b6c883588ae2c96f8 | 12 | test_pickle.py | 188 | ENH: add support for reading .tar archives (#44787)
* Add reproduction test for .tar.gz archives
co-authored-by: Margarete Dippel <margarete01@users.noreply.github.com>
* add support for .tar archives
python's `tarfile` supports gzip, xz and bz2 encoding,
so we don't need to make any special cases for that.
... | 39,808 | 0 | 114 | 109 | 44 | 166,376 | 57 | pandas | 20 | pandas/tests/io/test_pickle.py | Python | 12 | {
"docstring": "\n Read/write from binary file-objects w/wo compression.\n\n GH 26237, GH 29054, and GH 29570\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 13,
"vocab_size": 11
} | https://github.com/pandas-dev/pandas.git | |
5 | get_filtering | def get_filtering(self):
self.select_date_form = SelectDateForm(self.request.GET)
result = {}
if self.select_date_form.is_valid():
date_from = self.select_date_form.cleaned_data.get('date_from')
date_to = self.select_date_form.cleaned_data.get('date_to')
... | de3fcba9e95818e9634ab7de6bfcb1f4221f2775 | 15 | views.py | 174 | Fix warnings from flake8-comprehensions. | 15,592 | 0 | 265 | 100 | 42 | 70,980 | 58 | wagtail | 15 | wagtail/contrib/forms/views.py | Python | 15 | {
"docstring": " Return filering as a dict for submissions queryset ",
"language": "en",
"n_whitespaces": 9,
"n_words": 8,
"vocab_size": 8
} | https://github.com/wagtail/wagtail.git | |
9 | get_so_reservation_for_item | def get_so_reservation_for_item(args):
reserved_so = None
if args.get("against_sales_order"):
if get_reserved_qty_for_so(args.get("against_sales_order"), args.get("item_code")):
reserved_so = args.get("against_sales_order")
elif args.get("against_sales_invoice"):
sales_order = frappe.db.sql(
,
(args.get... | 494bd9ef78313436f0424b918f200dab8fc7c20b | 15 | get_item_details.py | 253 | style: format code with black | 14,622 | 0 | 25 | 146 | 26 | 67,799 | 42 | erpnext | 9 | erpnext/stock/get_item_details.py | Python | 18 | {
"docstring": "select sales_order from `tabSales Invoice Item` where\n\t\tparent=%s and item_code=%s",
"language": "en",
"n_whitespaces": 8,
"n_words": 10,
"vocab_size": 10
} | https://github.com/frappe/erpnext.git | |
1 | test_short_term_login_token | def test_short_term_login_token(self):
token = self.macaroon_generator.generate_short_term_login_token(
user_id="@user:tesths",
auth_provider_id="oidc",
auth_provider_session_id="sid",
duration_in_ms=2 * 60 * 1000,
)
info = self.macaroon_... | fe1daad67237c2154a3d8d8cdf6c603f0d33682e | 11 | test_macaroons.py | 233 | Move the "email unsubscribe" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986)
This simplifies the access token verification logic by removing the `rights`
parameter which was only ever used for the unsubscribe link in email
notifications. The latter has been moved un... | 72,365 | 0 | 240 | 135 | 39 | 248,585 | 55 | synapse | 17 | tests/util/test_macaroons.py | Python | 18 | {
"docstring": "Test the generation and verification of short-term login tokens",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/matrix-org/synapse.git | |
6 | polarity_scores | def polarity_scores(self, text):
# text, words_and_emoticons, is_cap_diff = self.preprocess(text)
sentitext = SentiText(
text, self.constants.PUNC_LIST, self.constants.REGEX_REMOVE_PUNCTUATION
)
sentiments = []
words_and_emoticons = sentitext.words_and_emotic... | 74bb3c28ce9f2cd2be4cd9176747d59a0d67285d | 15 | vader.py | 218 | Add a note stating that a hashtag is unsupported in VADER | 7,656 | 0 | 274 | 138 | 53 | 42,601 | 70 | nltk | 21 | nltk/sentiment/vader.py | Python | 19 | {
"docstring": "\n Return a float for sentiment strength based on the input text.\n Positive values are positive valence, negative value are negative\n valence.\n\n :note: Hashtags are not taken into consideration (e.g. #BAD is neutral). If you\n are interested in processing the... | https://github.com/nltk/nltk.git | |
1 | test_mutating_input_arrays_y_and_z | def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
ax1.set_ylim([0, 4])
ax1.set_zlim([0, 4])
fig_test.draw_without_rendering()
# mutate y,z to... | 7a1df7830f7685a99291d90c5e79bfc5e7876f31 | 10 | test_axes3d.py | 277 | Test that plot results aren't affected by mutating input arrays | 24,166 | 0 | 150 | 208 | 46 | 110,450 | 87 | matplotlib | 14 | lib/mpl_toolkits/mplot3d/tests/test_axes3d.py | Python | 19 | {
"docstring": "\n Test to see if the `z` axis does not get mutated\n after a call to `Axes3D.plot`\n\n test cases came from GH#8990\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 21,
"vocab_size": 20
} | https://github.com/matplotlib/matplotlib.git | |
4 | dag_edges | def dag_edges(dag):
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
| bb26f96665567325a7fbb810249820e7dac0322a | 9 | views.py | 48 | Make Grid and and Graph view work with task mapping (#21740)
* Expand mapped tasks in the Scheduler
Technically this is done inside
DagRun.task_instance_scheduling_decisions, but the only place that is
currently called is the Scheduler
The way we are getting `upstream_ti` to pass to expand_mapped_task is
all ... | 8,568 | 0 | 47 | 115 | 22 | 45,441 | 29 | airflow | 8 | airflow/www/views.py | Python | 18 | {
"docstring": "\n Create the list of edges needed to construct the Graph view.\n\n A special case is made if a TaskGroup is immediately upstream/downstream of another\n TaskGroup or task. Two dummy nodes named upstream_join_id and downstream_join_id are\n created for the TaskGroup. Instead of drawing an ... | https://github.com/apache/airflow.git | |
1 | require_tokenizers | def require_tokenizers(test_case):
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
| 57e6464ac9a31156f1c93e59107323e6ec01309e | 10 | testing_utils.py | 37 | Update all require decorators to use skipUnless when possible (#16999) | 6,797 | 0 | 13 | 20 | 7 | 37,492 | 7 | transformers | 5 | src/transformers/testing_utils.py | Python | 2 | {
"docstring": "\n Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 17,
"vocab_size": 16
} | https://github.com/huggingface/transformers.git | |
5 | current_operation | def current_operation(self) -> str | None:
modbus_control = self.device.states[OverkizState.MODBUS_CONTROL_DHW]
if modbus_control and modbus_control.value_as_str == OverkizCommandParam.STOP:
return STATE_OFF
current_mode = self.device.states[OverkizState.MODBUS_DHW_MODE]
... | 1c0f9cf941f77d6e3d299f98d5174f0a2953f236 | 9 | hitachi_dhw.py | 102 | Add Overkiz Hitachi DHW (#81536)
* Port ha-tahome hitachi dhw
* Use int for setting temperature
* Use value as float when possible
* Use device state for current operation
* Update homeassistant/components/overkiz/water_heater_entities/hitachi_dhw.py
Co-authored-by: Quentame <polletquentin74@me.com>
... | 90,732 | 0 | 94 | 65 | 23 | 291,628 | 30 | core | 15 | homeassistant/components/overkiz/water_heater_entities/hitachi_dhw.py | Python | 9 | {
"docstring": "Return current operation ie. eco, electric, performance, ...",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | https://github.com/home-assistant/core.git | |
4 | add_provs | def add_provs(self, reader):
fileids = reader.fileids()
for fileid in fileids:
prov, langfile = os.path.split(fileid)
file_name, file_extension = os.path.splitext(langfile)
if file_extension == ".tab":
lang = file_name.split("-")[-1]
... | 8ffd0d8190552d45f8b92e18da3fc41639e5185d | 14 | wordnet.py | 150 | Initialize empty provenance for default English | 7,546 | 0 | 210 | 84 | 41 | 42,453 | 54 | nltk | 16 | nltk/corpus/reader/wordnet.py | Python | 10 | {
"docstring": "Add languages from Multilingual Wordnet to the provenance dictionary",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/nltk/nltk.git | |
2 | site_data_dir | def site_data_dir(self) -> str:
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
path = os.environ.get("XDG_DATA_DIRS", "")
if not path.strip():
path = f"/usr/local/share{os.pathsep}/usr/share"
return self._with_multi_path(path)
| f3166e673fe8d40277b804d35d77dcdb760fc3b3 | 11 | unix.py | 78 | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | 3,281 | 0 | 73 | 39 | 24 | 20,229 | 27 | pipenv | 10 | pipenv/patched/notpip/_vendor/platformdirs/unix.py | Python | 10 | {
"docstring": "\n :return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is\n enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS\n path separator), e.g. ``/usr/local/share/$appname/$version`... | https://github.com/pypa/pipenv.git | |
3 | absorbing_probabilities | def absorbing_probabilities(self):
_, _, R, _ = self.decompose()
N = self.fundamental_matrix()
if R is None or N is None:
return None
return N*R
| 7fe8e027ae1d7f683243c0229b961671a6cbb4c5 | 8 | stochastic_process_types.py | 67 | Improved some documentation in the stats module | 48,618 | 0 | 69 | 41 | 17 | 197,540 | 23 | sympy | 7 | sympy/stats/stochastic_process_types.py | Python | 6 | {
"docstring": "\n Computes the absorbing probabilities, i.e.\n the ij-th entry of the matrix denotes the\n probability of Markov chain being absorbed\n in state j starting from state i.\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 26,
"vocab_size": 21
} | https://github.com/sympy/sympy.git | |
2 | doctype_matches | def doctype_matches(text, regex):
m = doctype_lookup_re.search(text)
if m is None:
return False
doctype = m.group(1)
return re.compile(regex, re.I).match(doctype.strip()) is not None
| f3166e673fe8d40277b804d35d77dcdb760fc3b3 | 11 | util.py | 87 | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | 3,407 | 0 | 43 | 54 | 17 | 20,520 | 21 | pipenv | 13 | pipenv/patched/notpip/_vendor/pygments/util.py | Python | 6 | {
"docstring": "Check if the doctype matches a regular expression (if present).\n\n Note that this method only checks the first part of a DOCTYPE.\n eg: 'html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"'\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 29,
"vocab_size": 27
} | https://github.com/pypa/pipenv.git | |
1 | test_converts_stats_period_start_end | def test_converts_stats_period_start_end(self):
payload = self.make_payload("discover", {"statsPeriodStart": "1w", "statsPeriodEnd": "5d"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data... | 096b5511e244eecd8799b2a0324655207ce8985e | 12 | test_data_export.py | 205 | ref(tests): Remove `get_valid_response()` (#34822) | 19,764 | 0 | 166 | 114 | 32 | 100,170 | 49 | sentry | 18 | tests/sentry/data_export/endpoints/test_data_export.py | Python | 15 | {
"docstring": "\n Ensures that statsPeriodStart and statsPeriodEnd is converted to start/end.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | https://github.com/getsentry/sentry.git | |
3 | response_add | def response_add(self, request, obj, post_url_continue=None):
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add an... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 11 | admin.py | 102 | Refs #33476 -- Reformatted code with Black. | 50,467 | 0 | 155 | 61 | 52 | 203,593 | 77 | django | 9 | django/contrib/auth/admin.py | Python | 5 | {
"docstring": "\n Determine the HttpResponse for the add_view stage. It mostly defers to\n its superclass implementation but is customized because the User model\n has a slightly different workflow.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 26,
"vocab_size": 24
} | https://github.com/django/django.git | |
1 | test_decimal_conversion_more_digits | def test_decimal_conversion_more_digits():
formatted = format_target_temperature("16.09")
assert formatted == "16.1"
| b0ed42a5a58976ebe82b5bbbb60c499648a1718b | 9 | test_temperature_format.py | 32 | Fix #69952: Daikin AC Temperature jumps after being set (#70326) | 95,665 | 0 | 18 | 15 | 8 | 296,690 | 9 | core | 3 | tests/components/daikin/test_temperature_format.py | Python | 3 | {
"docstring": "Check at most 1 decimal is kept when target temp is a decimal with more than 1 decimal.",
"language": "en",
"n_whitespaces": 17,
"n_words": 18,
"vocab_size": 15
} | https://github.com/home-assistant/core.git | |
1 | get_task_chosen_response | def get_task_chosen_response(request, task):
result_data = {
'id': task.id,
'name': task.name,
'edit_url': reverse('wagtailadmin_workflows:edit_task', args=[task.id]),
}
return render_modal_workflow(
request, None, None,
None, json_data={'step': 'task_chosen', 'r... | 60ba39ffb5ec6d760efa6e2ecbff7ede53b12464 | 13 | workflows.py | 103 | replace get_task_result_data helper with more useful one get_task_chosen_response | 15,531 | 0 | 75 | 62 | 23 | 70,608 | 25 | wagtail | 10 | wagtail/admin/views/workflows.py | Python | 10 | {
"docstring": "\n helper function: given a task, return the response indicating that it has been chosen\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 14,
"vocab_size": 14
} | https://github.com/wagtail/wagtail.git | |
5 | min_weight_matching | def min_weight_matching(G, maxcardinality=None, weight="weight"):
if maxcardinality not in (True, None):
raise nx.NetworkXError(
"The argument maxcardinality does not make sense "
"in the context of minimum weight matchings."
"It is deprecated and will be removed in ... | c3e1e7f4c6a4edb968494cd4775574ad26f2a96b | @not_implemented_for("multigraph")
@not_implemented_for("directed") | 11 | matching.py | 231 | Fix min_weight_matching to convert edge weights without reciprocal (#5394)
* Add test and then fix code and docs
* Correct and improve docs. Change 1e-6 to 1 to maintain integers.
Include argument in docstring for why adding the 1 doesn't impact the min | 41,940 | 1 | 163 | 137 | 65 | 176,513 | 84 | networkx | 22 | networkx/algorithms/matching.py | Python | 15 | {
"docstring": "Computing a minimum-weight maximal matching of G.\n\n Use the maximum-weight algorithm with edge weights subtracted\n from the maximum weight of all edges.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of it... | https://github.com/networkx/networkx.git |
8 | _find_agent_ip | def _find_agent_ip(vm_, vmid):
# This functionality is only available on qemu
if not vm_.get("technology") == "qemu":
log.warning("Find agent IP is only available under `qemu`")
return
# Create an empty list of IP-addresses:
ips = []
endpoint = "nodes/{}/qemu/{}/agent/network... | a5679caf65c7c79cd72841b6e5793b9b693744c9 | 15 | proxmox.py | 231 | Add support for get IP-address from agent | 54,362 | 0 | 254 | 128 | 78 | 216,056 | 106 | salt | 19 | salt/cloud/clouds/proxmox.py | Python | 19 | {
"docstring": "\n If VM is started we would return the IP-addresses that are returned by the qemu agent on the VM.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 19,
"vocab_size": 17
} | https://github.com/saltstack/salt.git | |
1 | test_get_name_error | def test_get_name_error():
test_sid = "S-1-2-3-4"
sid_obj = win32security.ConvertStringSidToSid(test_sid)
with pytest.raises(salt.exceptions.CommandExecutionError) as exc:
salt.utils.win_dacl.get_name(sid_obj)
assert "No mapping between account names" in exc.value.message
| 3bb43882e727b1d36abe2e501759c9c5e9048ecf | 12 | test_get_name.py | 87 | Add tests, migrate some tests to pytest | 54,131 | 0 | 43 | 48 | 20 | 215,737 | 21 | salt | 16 | tests/pytests/unit/utils/win_dacl/test_get_name.py | Python | 6 | {
"docstring": "\n Test get_name with an un mapped SID, should throw a CommandExecutionError\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 11
} | https://github.com/saltstack/salt.git | |
2 | get_relations | def get_relations(self, cursor, table_name):
cursor.execute(
"PRAGMA foreign_key_list(%s)" % self.connection.ops.quote_name(table_name)
)
return {
column_name: (ref_column_name, ref_table_name)
for _, _, ref_table_name, column_name, ref_column_name, *... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 12 | introspection.py | 85 | Refs #33476 -- Reformatted code with Black. | 51,029 | 0 | 93 | 56 | 24 | 205,195 | 25 | django | 13 | django/db/backends/sqlite3/introspection.py | Python | 8 | {
"docstring": "\n Return a dictionary of {column_name: (ref_column_name, ref_table_name)}\n representing all foreign keys in the given table.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | https://github.com/django/django.git | |
3 | average_items_per_ms | def average_items_per_ms(self) -> Optional[float]:
# We want to return None if this is the first background update item
if self.total_item_count == 0:
return None
# Avoid dividing by zero
elif self.avg_duration_ms == 0:
return 0
else:
... | 26211fec24d8d0a967de33147e148166359ec8cb | 12 | background_updates.py | 78 | Fix a bug in background updates wherein background updates are never run using the default batch size (#12157) | 71,674 | 0 | 158 | 45 | 47 | 247,442 | 61 | synapse | 7 | synapse/storage/background_updates.py | Python | 11 | {
"docstring": "An estimate of how long it takes to do a single update.\n Returns:\n A duration in ms as a float\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 20,
"vocab_size": 19
} | https://github.com/matrix-org/synapse.git | |
2 | _pause_and_wait_for_callback | async def _pause_and_wait_for_callback(self):
self._pause_requested = True
await self.async_media_pause()
try: | 26251895295d74fcd2c73e37804c23675c433247 | async def _pause_and_wait_for_callback(self):
"""Send pause and wait for the pause callback to be received."""
self._pause_requested = True
await self.async_media_pause()
try: | 7 | media_player.py | 34 | Use async_timeout in forked_daapd (#78451) | 106,437 | 1 | 37 | 53 | 9 | 307,669 | 9 | core | 4 | homeassistant/components/forked_daapd/media_player.py | Python | 9 | {
"docstring": "Send pause and wait for the pause callback to be received.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | https://github.com/home-assistant/core.git |
1 | set_options | def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),
margins=(None, None),
element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,
slider_border_width=None, slider_relie... | 07bb93d47f01468660a01f42150e87e5cb08d546 | 16 | PySimpleGUI.py | 19,192 | Addition of tooltip_offset parm to set_options call (major hack to get around 8.6.12 problem). Backed out the experiments to try and fix new problem with Ubuntu | 53,473 | 0 | 10,839 | 255 | 1,112 | 212,865 | 4,824 | PySimpleGUI | 131 | PySimpleGUI.py | Python | 14 | {
"docstring": "\n :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's\n :type icon: bytes ... | https://github.com/PySimpleGUI/PySimpleGUI.git | |
1 | test_load_with_supervisor_without_diagnostics | async def test_load_with_supervisor_without_diagnostics(hass):
analytics = Analytics(hass)
analytics._data.preferences[ATTR_DIAGNOSTICS] = True
assert analytics.preferences[ATTR_DIAGNOSTICS]
with patch(
"homeassistant.components.hassio.get_supervisor_info",
side_effect=Mock(return... | 46500beefcccd8106718a8172a5078bbe5579765 | 16 | test_analytics.py | 132 | Enable strict typing of analytics (#83119) | 95,933 | 0 | 85 | 78 | 22 | 296,961 | 26 | core | 12 | tests/components/analytics/test_analytics.py | Python | 13 | {
"docstring": "Test loading with a supervisor that has not diagnostics enabled.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | https://github.com/home-assistant/core.git | |
3 | test_issue_message_builder | def test_issue_message_builder(self):
self.event1.data["metadata"].update({"value": "some error"})
self.group1.data["metadata"].update({"value": "some error"})
self.event1.data["type"] = self.group1.data["type"] = "error"
issue_card = build_group_card(
group=self.group1, eve... | db35e231ceababe8c9f5ca7b5d2ca685f07c7d5b | 11 | test_message_builder.py | 694 | test(msteams): Add tests for building group card (#36834)
Add tests for build_group_card which builds issues cards. Does NOT test all visual aspects of the card. Only ensures that certain important elements are present and the basic structure of the card is correct. | 18,974 | 0 | 581 | 402 | 110 | 93,204 | 176 | sentry | 41 | tests/sentry/integrations/msteams/test_message_builder.py | Python | 60 | {
"docstring": "\\{\\{ # {{\n DATE\\( # DATE(\n [0-9T+:\\-]+,\\ SHORT # 2022-07-14T19:30:34, SHORT\n \\) # )\n \\}\\} # }}\n \\ # whitespace\n ... | https://github.com/getsentry/sentry.git | |
7 | cache_key | def cache_key(self, template_name, skip=None):
skip_prefix = ""
if skip:
matching = [
origin.name for origin in skip if origin.template_name == template_name
]
if matching:
skip_prefix = self.generate_hash(matching)
r... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 12 | cached.py | 106 | Refs #33476 -- Reformatted code with Black. | 51,474 | 0 | 127 | 66 | 28 | 206,296 | 36 | django | 12 | django/template/loaders/cached.py | Python | 9 | {
"docstring": "\n Generate a cache key for the template name and skip.\n\n If skip is provided, only origins that match template_name are included\n in the cache key. This ensures each template is only parsed and cached\n once if contained in different extend chains like:\n\n x... | https://github.com/django/django.git | |
2 | isNuitkaPython | def isNuitkaPython():
# spell-checker: ignore nuitkapython
if python_version >= 0x300:
return sys.implementation.name == "nuitkapython"
else:
return sys.subversion[0] == "nuitkapython"
_is_anaconda = None
| 77e7c06c0f9c5c0735b5a65c72abcd243d8e3640 | 11 | PythonFlavors.py | 59 | Minor cleanups | 42,801 | 0 | 47 | 29 | 19 | 178,712 | 22 | Nuitka | 7 | nuitka/PythonFlavors.py | Python | 5 | {
"docstring": "Is this our own fork of CPython named Nuitka-Python.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/Nuitka/Nuitka.git | |
3 | handle_pip_version_check | def handle_pip_version_check(self, options):
# type: (Values) -> None
# Make sure the index_group options are present.
assert hasattr(options, "no_index")
if options.disable_pip_version_check or options.no_index:
return
# Otherwise, check if we're using the... | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 12 | req_command.py | 117 | upd; format | 12,206 | 0 | 158 | 57 | 50 | 60,555 | 55 | transferlearning | 17 | .venv/lib/python3.8/site-packages/pip/_internal/cli/req_command.py | Python | 9 | {
"docstring": "\n Do the pip version check if not disabled.\n\n This overrides the default behavior of not doing the check.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 15
} | https://github.com/jindongwang/transferlearning.git | |
22 | submit_row | def submit_row(context):
add = context["add"]
change = context["change"]
is_popup = context["is_popup"]
save_as = context["save_as"]
show_save = context.get("show_save", True)
show_save_and_add_another = context.get("show_save_and_add_another", True)
show_save_and_continue = context.get... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | @register.tag(name="submit_row") | 15 | admin_modify.py | 380 | Refs #33476 -- Reformatted code with Black. | 50,412 | 1 | 457 | 213 | 64 | 203,500 | 131 | django | 24 | django/contrib/admin/templatetags/admin_modify.py | Python | 49 | {
"docstring": "\n Display the row of buttons for delete and save.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | https://github.com/django/django.git |
1 | test_ridgecv_normalize_deprecated | def test_ridgecv_normalize_deprecated(Estimator):
X = np.array([[1, -1], [1, 1]])
y = np.array([0, 1])
estimator = Estimator(normalize=True)
with pytest.warns(
FutureWarning, match=r"Set parameter alphas to: original_alphas \* n_samples"
):
estimator.fit(X, y)
| f14af688b7e77ecb6df9dfee93ec39b6c0334b86 | 11 | test_ridge.py | 108 | FIX Make Ridge*CV warn about rescaling alphas with scaling (#22585) | 75,551 | 0 | 60 | 68 | 26 | 259,066 | 28 | scikit-learn | 13 | sklearn/linear_model/tests/test_ridge.py | Python | 8 | {
"docstring": "Check that the normalize deprecation warning mentions the rescaling of alphas\n\n Non-regression test for issue #22540\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 16,
"vocab_size": 15
} | https://github.com/scikit-learn/scikit-learn.git | |
1 | async_start_charging | async def async_start_charging(self) -> None:
await self.hass.async_add_executor_job(self.leaf.start_charging)
self.schedule_update()
| 10027b20904b678d8baecbc6e72c5bcc3f4f24b2 | 10 | __init__.py | 47 | Add button to start leaf charge (#62948)
Co-authored-by: Bruce Duncan <bwduncan@gmail.com> | 107,548 | 0 | 29 | 26 | 8 | 308,815 | 8 | core | 7 | homeassistant/components/nissan_leaf/__init__.py | Python | 4 | {
"docstring": "Request to start charging the car. Used by the button platform.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | https://github.com/home-assistant/core.git | |
1 | async_update | async def async_update(self) -> None:
await self._smappee_base.async_update()
self._state = self._service_location.is_present
| 0c767bd0d37a41af37728b1d8b4eae8dceb7e188 | 9 | binary_sensor.py | 45 | Improve entity type hints [s] (part 1/2) (#77881) | 105,270 | 0 | 31 | 25 | 10 | 306,486 | 10 | core | 6 | homeassistant/components/smappee/binary_sensor.py | Python | 4 | {
"docstring": "Get the latest data from Smappee and update the state.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | https://github.com/home-assistant/core.git | |
1 | test_mount_half_u_devices | def test_mount_half_u_devices(self):
rack = Rack.objects.first()
attrs = {
'device_type': DeviceType.objects.get(u_height=0.5),
'device_role': DeviceRole.objects.first(),
'site': Site.objects.first(),
'rack': rack,
'face': DeviceFaceCh... | 103729c0855aad2f45fcaa2cf680799236f3e201 | 11 | test_models.py | 196 | Add test for 0.5U devices | 77,999 | 0 | 137 | 121 | 30 | 265,126 | 33 | netbox | 21 | netbox/dcim/tests/test_models.py | Python | 12 | {
"docstring": "\n Check that two 0.5U devices can be mounted in the same rack unit.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 13
} | https://github.com/netbox-community/netbox.git | |
1 | make_homeserver | def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver()
return hs
| 922b771337f6d14a556fa761c783748f698e924b | 8 | unittest.py | 32 | Add missing type hints for tests.unittest. (#13397) | 72,530 | 0 | 30 | 19 | 8 | 248,955 | 9 | synapse | 6 | tests/unittest.py | Python | 3 | {
"docstring": "\n Make and return a homeserver.\n\n Args:\n reactor: A Twisted Reactor, or something that pretends to be one.\n clock (synapse.util.Clock): The Clock, associated with the reactor.\n\n Returns:\n A homeserver suitable for testing.\n\n Functi... | https://github.com/matrix-org/synapse.git | |
7 | ordered | def ordered(self):
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A defau... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 13 | query.py | 103 | Refs #33476 -- Reformatted code with Black. | 51,188 | 0 | 177 | 63 | 29 | 205,746 | 36 | django | 11 | django/db/models/query.py | Python | 14 | {
"docstring": "\n Return True if the QuerySet is ordered -- i.e. has an order_by()\n clause or a default ordering on the model (or is empty).\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 21
} | https://github.com/django/django.git | |
1 | get_invalid_response_data | def get_invalid_response_data(self, form):
return {
"success": False,
"error_message": "\n".join(form.errors["file"]),
}
| d10f15e55806c6944827d801cd9c2d53f5da4186 | 11 | multiple_upload.py | 54 | Reformat with black | 15,887 | 0 | 53 | 29 | 10 | 72,414 | 10 | wagtail | 5 | wagtail/admin/views/generic/multiple_upload.py | Python | 5 | {
"docstring": "\n Return the JSON response data for an invalid form submission\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | https://github.com/wagtail/wagtail.git | |
2 | path_objects | def path_objects(self):
if not hasattr(self, '_path_objects'):
self._path_objects = self._get_path()
return self._path_objects
| 6ff2e55ce408f0f7f2fe99129048421c25ecafe6 | 10 | cables.py | 50 | Add origins, destinations properties on CablePath | 77,911 | 0 | 43 | 28 | 10 | 264,915 | 11 | netbox | 5 | netbox/dcim/models/cables.py | Python | 4 | {
"docstring": "\n Cache and return the complete path as lists of objects, derived from their annotation within the path.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 17,
"vocab_size": 16
} | https://github.com/netbox-community/netbox.git | |
7 | gui_repaint | def gui_repaint(self, drawDC=None):
_log.debug("%s - gui_repaint()", type(self))
# The "if self" check avoids a "wrapped C/C++ object has been deleted"
# RuntimeError if doing things after window is closed.
if not (self and self.IsShownOnScreen()):
return
if ... | e1eca0aa8bf0b51009e012cd37d3e95f364d0ee9 | 13 | backend_wx.py | 350 | Expire deprecations in backends | 22,898 | 0 | 448 | 175 | 121 | 107,757 | 155 | matplotlib | 31 | lib/matplotlib/backends/backend_wx.py | Python | 17 | {
"docstring": "\n Update the displayed image on the GUI canvas, using the supplied\n wx.PaintDC device context.\n\n The 'WXAgg' backend sets origin accordingly.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 20,
"vocab_size": 18
} | https://github.com/matplotlib/matplotlib.git | |
4 | copy_safe_request | def copy_safe_request(request):
meta = {
k: request.META[k]
for k in HTTP_REQUEST_META_SAFE_COPY
if k in request.META and isinstance(request.META[k], str)
}
return NetBoxFakeRequest({
'META': meta,
'COOKIES': request.COOKIES,
'POST': request.POST,
... | 540bba4544d9f31c126571cc1a45a6783b3b6a89 | 13 | utils.py | 158 | Closes #10920: Include request cookies when queuing a custom script | 78,322 | 0 | 138 | 97 | 43 | 266,161 | 45 | netbox | 16 | netbox/utilities/utils.py | Python | 16 | {
"docstring": "\n Copy selected attributes from a request object into a new fake request object. This is needed in places where\n thread safe pickling of the useful request data is needed.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 29,
"vocab_size": 25
} | https://github.com/netbox-community/netbox.git | |
1 | products_for_sorting_with_channels | def products_for_sorting_with_channels(category, channel_USD, channel_PLN):
product_type = ProductType.objects.create(name="Apple", kind=ProductTypeKind.NORMAL)
products = Product.objects.bulk_create(
[
Product(
name="Product1",
slug="prod1",
c... | 3f773c3890aead936949bd6923d2d7f669e1c68f | @pytest.mark.parametrize(
"sort_by",
[
{"field": "PUBLISHED", "direction": "ASC"},
{"field": "PRICE", "direction": "DESC"},
{"field": "MINIMAL_PRICE", "direction": "DESC"},
{"field": "PUBLICATION_DATE", "direction": "DESC"},
],
) | 18 | test_product_filtering_and_sorting_with_channels.py | 1,552 | Add sorting by LAST_MODIFIED_AT field to GraphQL schema (#9245)
* Add sorting by LAST_MODIFIED_AT to new types
* Add LAST_MODIFIED_AT to sorting exported files
* Update schema, fix variant sorter
* Update changelog
* Rebase and update changelog
Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.... | 4,954 | 1 | 2,732 | 991 | 105 | 26,250 | 263 | saleor | 45 | saleor/graphql/product/tests/test_product_filtering_and_sorting_with_channels.py | Python | 196 | {
"docstring": "\n query ($sortBy: ProductOrder, $filter: ProductFilterInput, $channel: String){\n products (\n first: 10, sortBy: $sortBy, filter: $filter, channel: $channel\n ) {\n edges {\n node {\n name\n slug\n ... | https://github.com/saleor/saleor.git |
8 | _try_compile_deployment_target | def _try_compile_deployment_target(self, operator, target):
orig_environ = os.environ
os.environ = orig_environ.copy()
self.addCleanup(setattr, os, 'environ', orig_environ)
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACO... | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 14 | test_build_ext.py | 500 | add python 3.10.4 for windows | 56,858 | 0 | 704 | 288 | 129 | 223,085 | 196 | XX-Net | 47 | python3.10.4/Lib/distutils/tests/test_build_ext.py | Python | 55 | {
"docstring": "\\\n #include <AvailabilityMacros.h>\n\n int dummy;\n\n #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED\n #else\n #error \"Unexpected target\"\n #endif\n\n ",
"language": "en",
"n_whitespaces": 115,
... | https://github.com/XX-net/XX-Net.git | |
1 | test_edit_post_locked_by_self | def test_edit_post_locked_by_self(self):
# Lock the snippet
self.lock_snippet(self.user)
# Try to edit the snippet
response = self.client.post(
self.get_url("edit"),
{"text": "Edited while locked"},
follow=True,
)
self.refresh... | 10dbbddaf35607e4257f50dd960520a1268dd225 | 11 | test_locking.py | 142 | Add tests for locking snippets | 17,037 | 0 | 216 | 77 | 45 | 80,233 | 63 | wagtail | 17 | wagtail/snippets/tests/test_locking.py | Python | 14 | {
"docstring": "A user can edit a snippet that is locked by themselves.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/wagtail/wagtail.git | |
1 | test_jemalloc_env_var_propagate | def test_jemalloc_env_var_propagate():
gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER
expected = {}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path="", jemalloc_conf="", jemalloc_comps=[], process_type=gcs_ptype
)
assert actual == expected
actual... | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 12 | test_advanced_4.py | 420 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 29,488 | 0 | 381 | 258 | 52 | 131,233 | 114 | ray | 20 | python/ray/tests/test_advanced_4.py | Python | 57 | {
"docstring": "Test `propagate_jemalloc_env_var`\n If the shared library path is not specified,\n it should return an empty dict.\n \n When the shared library is specified\n \n When the malloc config is specified\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 28,
"vocab_size": 20... | https://github.com/ray-project/ray.git | |
3 | create_userconfig | def create_userconfig(instance, created, raw=False, **kwargs):
if created and not raw:
config = get_config()
UserConfig(user=instance, data=config.DEFAULT_USER_PREFERENCES).save()
#
# REST API
#
| 1636508a6ac8df6b93d0ea5c621c174f605fd47a | 13 | models.py | 71 | Fixes #9156: Fix loading UserConfig data from fixtures | 77,785 | 0 | 37 | 42 | 18 | 264,682 | 20 | netbox | 12 | netbox/users/models.py | Python | 4 | {
"docstring": "\n Automatically create a new UserConfig when a new User is created. Skip this if importing a user from a fixture.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 20,
"vocab_size": 16
} | https://github.com/netbox-community/netbox.git | |
6 | validate_attr | def validate_attr(self, append) -> None:
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
... | 7d2f9b8d59908fbf57c6453bc41891efbfe981a6 | 12 | pytables.py | 124 | TYP: some return annotations in pytables.py (#47512) | 39,982 | 0 | 181 | 78 | 34 | 167,375 | 59 | pandas | 13 | pandas/io/pytables.py | Python | 11 | {
"docstring": "validate that we have the same order as the existing & same dtype",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 11
} | https://github.com/pandas-dev/pandas.git | |
2 | load_blocks_from_repo | def load_blocks_from_repo(name, src=None, api_key=None, alias=None, **kwargs):
if src is None:
tokens = name.split(
"/"
) # Separate the source (e.g. "huggingface") from the repo name (e.g. "google/vit-base-patch16-224")
assert (
len(tokens) > 1
), "Eith... | cb2713e7050f2783493736e43a6b704865ce61c5 | 12 | external.py | 167 | Getting Interface.load() working for 2.x and 3.x models and Spaces (#1361)
* version
* refactor for model and 2.x spaces
* fixing tests
* fixed tests
* getting there...
* formatting
* formatting
* fixes
* formatting
* external dependencies working
* formatting
* loading from 3.x
* c... | 43,144 | 0 | 165 | 104 | 61 | 180,326 | 75 | gradio | 17 | gradio/external.py | Python | 15 | {
"docstring": "Creates and returns a Blocks instance from several kinds of Hugging Face repos:\n 1) A model repo\n 2) A Spaces repo running Gradio 2.x\n 3) A Spaces repo running Gradio 3.x\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 31,
"vocab_size": 24
} | https://github.com/gradio-app/gradio.git | |
4 | get_backend_for_dir | def get_backend_for_dir(self, location):
# type: (str) -> Optional[VersionControl]
vcs_backends = {}
for vcs_backend in self._registry.values():
repo_path = vcs_backend.get_repository_root(location)
if not repo_path:
continue
logger.de... | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 10 | versioncontrol.py | 126 | upd; format | 12,562 | 0 | 257 | 75 | 67 | 61,419 | 86 | transferlearning | 16 | .venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py | Python | 13 | {
"docstring": "\n Return a VersionControl object if a repository of that type is found\n at the given directory.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 15
} | https://github.com/jindongwang/transferlearning.git | |
1 | _stamp_regen_task | def _stamp_regen_task(task, visitor, **headers):
task.stamp(visitor=visitor, **headers)
return task
| 3a7a82af9588629dad5807e0862bacbbd5d7a7f2 | 8 | canvas.py | 39 | Canvas.py doc enhancement (#7889)
* Enhanced doc for canvas.maybe_unroll_group()
* Enhanced doc for canvas._stamp_regen_task()
* Enhanced doc for canvas._merge_dictionaries() | 52,267 | 0 | 17 | 24 | 8 | 208,258 | 8 | celery | 5 | celery/canvas.py | Python | 3 | {
"docstring": "When stamping a sequence of tasks created by a generator,\n we use this function to stamp each task in the generator\n without exhausting it.",
"language": "en",
"n_whitespaces": 29,
"n_words": 24,
"vocab_size": 23
} | https://github.com/celery/celery.git | |
2 | manage_matplotlib_context | def manage_matplotlib_context() -> Any:
originalRcParams = matplotlib.rcParams.copy()
# Credits for this style go to the ggplot and seaborn packages.
# We copied the style file to remove dependencies on the Seaborn package.
# Check it out, it's an awesome library for plotting
customRcParam... | 11e1a8a3fa8d13513fe926b731fb907a066af2a1 | 15 | context.py | 503 | fix: change context managed backend (#1149) | 46,847 | 0 | 662 | 273 | 139 | 191,835 | 184 | ydata-profiling | 19 | src/pandas_profiling/visualisation/context.py | Python | 62 | {
"docstring": "Return a context manager for temporarily changing matplotlib unit registries and rcParams.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | https://github.com/ydataai/ydata-profiling.git | |
1 | test_empty_backend | def test_empty_backend(self) -> None:
yaml_str =
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(output_error, )
| bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d | 8 | test_gen_backend_stubs.py | 47 | Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950
This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.
Test Plan: Imported from OSS
Reviewed By: albanD
Diff... | 21,490 | 0 | 32 | 26 | 10 | 102,175 | 11 | pytorch | 6 | tools/test/test_gen_backend_stubs.py | Python | 8 | {
"docstring": "\\\nbackend:\ncpp_namespace: torch_xla\nsupported:\n- absYou must provide a value for \"backend\"",
"language": "en",
"n_whitespaces": 8,
"n_words": 13,
"vocab_size": 13
} | https://github.com/pytorch/pytorch.git | |
3 | _run_offline_evaluation | def _run_offline_evaluation(self):
assert len(self.workers.local_worker().policy_map) == 1
parallelism = self.evaluation_config.evaluation_num_workers or 1
offline_eval_results = {"off_policy_estimator": {}}
for evaluator_name, offline_evaluator in self.reward_estimators.items(... | e368dd9b4e10026767df66d1811a92bd8ca2d8f9 | 12 | algorithm.py | 121 | [RLlib] By-pass Evaluation workers when doing OPE (#30135)
Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> | 30,913 | 0 | 150 | 74 | 26 | 136,419 | 30 | ray | 17 | rllib/algorithms/algorithm.py | Python | 12 | {
"docstring": "Runs offline evaluation via `OfflineEvaluator.estimate_on_dataset()` API.\n\n This method will be used when `evaluation_dataset` is provided.\n Note: This will only work if the policy is a single agent policy.\n\n Returns:\n The results dict from the offline evaluation ... | https://github.com/ray-project/ray.git | |
5 | configure_optimizers | def configure_optimizers(self):
# pylint: disable=assignment-from-none
arc_optimizers = self.configure_architecture_optimizers()
if arc_optimizers is None:
return self.model.configure_optimizers()
if isinstance(arc_optimizers, optim.Optimizer):
arc_optim... | 14d2966b9e91ae16dcc39de8f41017a75cec8ff9 | 11 | base_lightning.py | 211 | Valuechoice oneshot lightning (#4602) | 24,584 | 0 | 296 | 130 | 85 | 112,126 | 114 | nni | 24 | nni/retiarii/oneshot/pytorch/base_lightning.py | Python | 17 | {
"docstring": "\n Combine architecture optimizers and user's model optimizers.\n You can overwrite configure_architecture_optimizers if architecture optimizers are needed in your NAS algorithm.\n For now ``self.model`` is tested against :class:`nni.retiarii.evaluator.pytorch.lightning._Supervise... | https://github.com/microsoft/nni.git | |
2 | crop | def crop(clip, i, j, h, w):
if len(clip.size()) != 4:
raise ValueError("clip should be a 4D tensor")
return clip[..., i : i + h, j : j + w]
| 289fce29b3e2392114aadbe7a419df0f2e3ac1be | 10 | _functional_video.py | 74 | Replace asserts with exceptions (#5587)
* replace most asserts with exceptions
* fix formating issues
* fix linting and remove more asserts
* fix regresion
* fix regresion
* fix bug
* apply ufmt
* apply ufmt
* fix tests
* fix format
* fix None check
* fix detection models tests
* no... | 46,893 | 0 | 45 | 48 | 24 | 192,419 | 29 | vision | 9 | torchvision/transforms/_functional_video.py | Python | 4 | {
"docstring": "\n Args:\n clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 13
} | https://github.com/pytorch/vision.git | |
7 | binary_crossentropy | def binary_crossentropy(target, output, from_logits=False):
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
# Use logits whenever they are available. `softmax` and `sigmoid`
# activations cache logits on the `output` Tensor.
if hasattr(output, "_keras_logits"):
... | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | @keras_export("keras.backend.binary_focal_crossentropy")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | 14 | backend.py | 387 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 80,219 | 1 | 421 | 222 | 121 | 269,598 | 176 | keras | 37 | keras/backend.py | Python | 31 | {
"docstring": "Binary crossentropy between an output tensor and a target tensor.\n\n Args:\n target: A tensor with the same shape as `output`.\n output: A tensor.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n ... | https://github.com/keras-team/keras.git |
1 | print_help | def print_help(self):
help_text = f
console.print(text=help_text, menu="Custom - Quantitative Analysis")
| 6a66f3f3ed934e0615ff4ba283ee67fcc43d3656 | 9 | qa_controller.py | 54 | Custom data context (#1193)
* Add first iteration of custom context
* Add sample data + improve plot
* Change `head` to `show` with sorting and limit. Add "-x" to plot and dynamic update of completer
* generate random time series for test csv
* Make columns lower case. Check if date is in columns and con... | 83,981 | 0 | 31 | 22 | 10 | 281,704 | 10 | OpenBBTerminal | 9 | gamestonk_terminal/custom/quantitative_analysis/qa_controller.py | Python | 31 | {
"docstring": "Print help[cmds]\n load load new data file\n pick pick target column for analysis[/cmds]\n\n[param]File: [/param]{self.file}\n[param]Target Column: [/param]{self.target}\n[cmds]\n[info]Statistics:[/info]\n summary brief summary statistics of loaded stock.\n normality norm... | https://github.com/OpenBB-finance/OpenBBTerminal.git | |
2 | unescape | def unescape(s):
if '&' not in s:
return s
return _charref.sub(_replace_charref, s)
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 7 | __init__.py | 40 | add python 3.10.4 for windows | 54,879 | 0 | 28 | 23 | 11 | 217,668 | 12 | XX-Net | 5 | python3.10.4/Lib/html/__init__.py | Python | 4 | {
"docstring": "\n Convert all named and numeric character references (e.g. >, >,\n &x3e;) in the string s to the corresponding unicode characters.\n This function uses the rules defined by the HTML 5 standard\n for both valid and invalid character references, and the list of\n HTML 5 named char... | https://github.com/XX-net/XX-Net.git | |
4 | upsample_conv_2d | def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
r
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
w = tf.convert_to_tensor(w)
assert w.shape.rank == 4
convH = w.shape[0].value
convW = w.shape[1].value
inC = _shape(w, 2)
ou... | 7375ee364e0df2a417f92593e09557f1b2a3575a | 16 | upfirdn_2d.py | 602 | initialize ostec | 1,607 | 0 | 317 | 387 | 110 | 9,407 | 198 | insightface | 34 | reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py | Python | 48 | {
"docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations.\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\... | https://github.com/deepinsight/insightface.git | |
1 | get_assets | def get_assets(filters):
return frappe.db.sql(
,
{"to_date": filters.to_date, "from_date": filters.from_date, "company": filters.company},
as_dict=1,
)
| 494bd9ef78313436f0424b918f200dab8fc7c20b | 10 | asset_depreciations_and_balances.py | 64 | style: format code with black | 13,808 | 0 | 7 | 39 | 13 | 65,150 | 13 | erpnext | 9 | erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py | Python | 49 | {
"docstring": "\n\t\tSELECT results.asset_category,\n\t\t\t sum(results.accumulated_depreciation_as_on_from_date) as accumulated_depreciation_as_on_from_date,\n\t\t\t sum(results.depreciation_eliminated_during_the_period) as depreciation_eliminated_during_the_period,\n\t\t\t sum(results.depreciation_amount_dur... | https://github.com/frappe/erpnext.git | |
5 | _get_or_create | def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
# Defaults
if name is None:
name = s.name
if dtype is None:
dtype = 'floatX'
if broadcastable is None:
broadcastable = ()
key = self._get_key(s, name, dtype=dtyp... | 68bd82de645a61f4bbc0b6246e70959373c9cba2 | 9 | aesaracode.py | 164 | fix(printing): change Aesara argument broadcastable to shape | 49,056 | 0 | 165 | 107 | 30 | 198,878 | 51 | sympy | 13 | sympy/printing/aesaracode.py | Python | 13 | {
"docstring": "\n Get the Aesara variable for a SymPy symbol from the cache, or create it\n if it does not exist.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | https://github.com/sympy/sympy.git | |
4 | get_downstream_powerports | def get_downstream_powerports(self, leg=None):
poweroutlets = self.poweroutlets.filter(cable__isnull=False)
if leg:
poweroutlets = poweroutlets.filter(feed_leg=leg)
if not poweroutlets:
return PowerPort.objects.none()
q = Q()
for poweroutlet in p... | fcd1daaf798d62023f999c3e09e035f7b3f47c8f | 12 | device_components.py | 132 | Update power utilization calculations for new cabling model | 78,026 | 0 | 154 | 82 | 24 | 265,204 | 31 | netbox | 16 | netbox/dcim/models/device_components.py | Python | 13 | {
"docstring": "\n Return a queryset of all PowerPorts connected via cable to a child PowerOutlet.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | https://github.com/netbox-community/netbox.git | |
2 | call_news | def call_news(self, other_args):
parser = argparse.ArgumentParser(
prog="news",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=,
)
parser.add_argument(
"-l",
"--limit",
... | e59a30b18873f7449bc59a88c3da21894e0dbe0a | 11 | dd_controller.py | 480 | Add crypto DD commands (#1710)
* Add github activity over time
* Create tests
* Update default days in chart command
* Add san package to poetry
* Fix tests failed
* Generate fixtures
* Fix tests failed
* Remove sanpy package and use requests instead
* Adjust index
* Add hugo server
* Fix... | 84,695 | 0 | 1,015 | 301 | 126 | 284,320 | 161 | OpenBBTerminal | 43 | openbb_terminal/cryptocurrency/due_diligence/dd_controller.py | Python | 83 | {
"docstring": "Process news commandDisplay most recent news on the given coin from CryptoPanic aggregator platform.\n [Source: https://cryptopanic.com/]",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | https://github.com/OpenBB-finance/OpenBBTerminal.git | |
6 | make_layoutgrids_gs | def make_layoutgrids_gs(layoutgrids, gs):
if gs in layoutgrids or gs.figure is None:
return layoutgrids
# in order to do constrained_layout there has to be at least *one*
# gridspec in the tree:
layoutgrids['hasgrids'] = True
if not hasattr(gs, '_subplot_spec'):
# normal gridsp... | c682ca40c647770a967b6b8a7615eb91c7cb3fc9 | 16 | _constrained_layout.py | 361 | FIX: better repr for subgridspecs | 22,565 | 0 | 510 | 230 | 80 | 107,046 | 134 | matplotlib | 29 | lib/matplotlib/_constrained_layout.py | Python | 33 | {
"docstring": "\n Make the layoutgrid for a gridspec (and anything nested in the gridspec)\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 12,
"vocab_size": 11
} | https://github.com/matplotlib/matplotlib.git | |
10 | data_received | def data_received(self, data):
if self._sslpipe is None:
# transport closing, sslpipe is destroyed
return
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException ... | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 17 | sslproto.py | 217 | add python 3.10.4 for windows | 56,109 | 0 | 488 | 130 | 55 | 220,737 | 74 | XX-Net | 21 | python3.10.4/Lib/asyncio/sslproto.py | Python | 29 | {
"docstring": "Called when some SSL data is received.\n\n The argument is a bytes object.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 12
} | https://github.com/XX-net/XX-Net.git | |
3 | make_increasing_candle | def make_increasing_candle(open, high, low, close, dates, **kwargs):
increase_x, increase_y = _Candlestick(
open, high, low, close, dates, **kwargs
).get_candle_increase()
if "line" in kwargs:
kwargs.setdefault("fillcolor", kwargs["line"]["color"])
else:
kwargs.setdefault("... | 43e3a4011080911901176aab919c0ecf5046ddd3 | 12 | _candlestick.py | 237 | switch to black .22 | 57,816 | 0 | 165 | 145 | 41 | 226,141 | 52 | plotly.py | 21 | packages/python/plotly/plotly/figure_factory/_candlestick.py | Python | 23 | {
"docstring": "\n Makes boxplot trace for increasing candlesticks\n\n _make_increasing_candle() and _make_decreasing_candle separate the\n increasing traces from the decreasing traces so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'in... | https://github.com/plotly/plotly.py.git | |
4 | normalize_span_histogram_resutls | def normalize_span_histogram_resutls(span, histogram_params, results):
histogram_column = get_span_histogram_column(span, histogram_params)
bin_name = get_function_alias(histogram_column)
# zerofill and rename the columns while making sure to adjust for precision
bucket_map = {}
for row in re... | 6c49c2ff46496809d6620ac3746262c66f02142e | 13 | discover.py | 203 | ref(spans): Normalize exclusive time histogram results (#32762)
* ref(spans): Normalize exclusive time histogram results
* test normalized data | 19,394 | 0 | 184 | 124 | 71 | 97,246 | 90 | sentry | 22 | src/sentry/snuba/discover.py | Python | 15 | {
"docstring": "\n Normalizes the span histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param [Span] span: The span for which you want to generate the\n histograms for.\n :param HistogramParams histogram_params: The histogram parameters u... | https://github.com/getsentry/sentry.git | |
1 | round | def round(self, decimals=0):
from dask.array.routines import round
return round(self, decimals=decimals)
| 2820bae493a49cb1d0a6e376985c5473b8f04fa8 | 8 | core.py | 42 | Don't include docs in ``Array`` methods, just refer to module docs (#9244)
Co-authored-by: James Bourbeau <jrbourbeau@users.noreply.github.com> | 36,752 | 0 | 31 | 27 | 9 | 156,742 | 10 | dask | 6 | dask/array/core.py | Python | 3 | {
"docstring": "Return array with each element rounded to the given number of decimals.\n\n Refer to :func:`dask.array.round` for full documentation.\n\n See Also\n --------\n dask.array.round : equivalent function\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 25,
"... | https://github.com/dask/dask.git | |
1 | test_encoding_latin1_118 | def test_encoding_latin1_118(self, datapath):
# GH 25960
msg =
with tm.assert_produces_warning(UnicodeWarning) as w:
encoded = read_stata(
datapath("io", "data", "stata", "stata1_encoding_118.dta")
)
assert len(w) == 151
assert w[0... | c055dc4e6be9fc1b68d873a1ace286322dadd5e1 | 13 | test_stata.py | 141 | TST: Don't use autouse fixture in test_stata (#45831) | 39,577 | 0 | 130 | 82 | 31 | 164,632 | 36 | pandas | 17 | pandas/tests/io/test_stata.py | Python | 14 | {
"docstring": "\nOne or more strings in the dta file could not be decoded using utf-8, and\nso the fallback encoding of latin-1 is being used. This can happen when a file\nhas been incorrectly encoded by Stata or some other software. You should verify\nthe string values returned are correct.",
"language": "en",
... | https://github.com/pandas-dev/pandas.git | |
1 | locator | def locator(self, loc):
self._long_axis().set_major_locator(loc)
self._locator = loc
| 6010bb43ed01c48c7c403569dd210490b236a853 | 9 | colorbar.py | 40 | MNT: make colorbars locators and formatters properties | 22,709 | 0 | 28 | 23 | 7 | 107,364 | 7 | matplotlib | 6 | lib/matplotlib/colorbar.py | Python | 3 | {
"docstring": "\n Set the major locator being used for colorbar\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | https://github.com/matplotlib/matplotlib.git | |
1 | test_removing_entity_unavailable | async def test_removing_entity_unavailable(hass):
entry = er.RegistryEntry(
entity_id="hello.world",
unique_id="test-unique-id",
platform="test-platform",
disabled_by=None,
)
ent = entity.Entity()
ent.hass = hass
ent.entity_id = "hello.world"
ent.registry_en... | 26a85c6644991f626ccce62c05665095c2577234 | 10 | test_entity.py | 178 | Add Entity.has_entity_name attribute (#73217) | 113,341 | 0 | 123 | 104 | 31 | 314,737 | 50 | core | 20 | tests/helpers/test_entity.py | Python | 19 | {
"docstring": "Test removing an entity that is still registered creates an unavailable state.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | https://github.com/home-assistant/core.git | |
30 | capacity_values | def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):
if qs is None: # Optionally BYOQS - bring your own queryset
qs = self.all().prefetch_related('instances')
instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)
if tasks is None:
... | 604cbc17376620dc67df35386421835d43732a4e | 18 | managers.py | 817 | Consume control capacity (#11665)
* Select control node before start task
Consume capacity on control nodes for controlling tasks and consider
remainging capacity on control nodes before selecting them.
This depends on the requirement that control and hybrid nodes should all
be in the instance group named 'con... | 17,078 | 0 | 1,412 | 503 | 129 | 80,597 | 296 | awx | 42 | awx/main/managers.py | Python | 65 | {
"docstring": "\n Returns a dictionary of capacity values for all IGs\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | https://github.com/ansible/awx.git | |
1 | test_all_no_duplicate_names | def test_all_no_duplicate_names(self, gp_mock, glob_mock):
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
gp_mock.return_value = [
fixture_path,
'/path/to'
]
glob_mock.glob.side_effect = [
[os.path.join(fixture_pa... | 4260b71cc77b7a44e061668d0d408d847f550156 | 11 | test_plugins.py | 209 | refactor and fixes for doc parsing (#77719)
* refactor and remove redundant code in documentation
allow location and building api to be more accessible
fix issues with displaying ansible.legacy and ansible.builtin
ensure we don't x2 process tokens (some modules reference them also) fixes #77764
move to c... | 79,491 | 0 | 195 | 124 | 48 | 268,361 | 60 | ansible | 23 | test/units/plugins/test_plugins.py | Python | 15 | {
"docstring": "\n This test goes along with ``test__load_module_source_no_duplicate_names``\n and ensures that we ignore duplicate imports on multiple paths\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | https://github.com/ansible/ansible.git | |
1 | binary_matches | def binary_matches(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
return tf.cast(tf.equal(y_true, y_pred), tf.int8) | 119cd4655d01570a70c70879dff4461ea46161bf | 9 | metrics_utils.py | 98 | Added util metric method for binary_matches. Decoupled from public metric binarry_acc | 79,806 | 0 | 26 | 66 | 17 | 268,987 | 21 | keras | 10 | keras/utils/metrics_utils.py | Python | 5 | {
"docstring": "Creates int Tensor, 1 for label-prediction match, 0 for mismatch.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n ... | https://github.com/keras-team/keras.git | |
7 | get_matrix | def get_matrix(self):
from sympy.matrices.dense import Matrix
deprecate_data()
with ignore_warnings(SymPyDeprecationWarning):
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
... | cba899d4137b0b65f6850120ee42cd4fcd4f9dbf | 18 | tensor.py | 235 | Update the various tensor deprecations | 48,346 | 0 | 401 | 148 | 49 | 197,113 | 70 | sympy | 20 | sympy/tensor/tensor.py | Python | 21 | {
"docstring": "\n DEPRECATED: do not use.\n\n Returns ndarray components data as a matrix, if components data are\n available and ndarray dimension does not exceed 2.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 23,
"vocab_size": 19
} | https://github.com/sympy/sympy.git | |
1 | test_device_stats_gpu_from_torch | def test_device_stats_gpu_from_torch(tmpdir):
model = BoringModel()
device_stats = DeviceStatsMonitor()
| b56d8677ad0ff8513e566334f4a78a24b88480c3 | 8 | test_device_stats_monitor.py | 31 | Update test_pruning.py to use `devices` instead of `gpus` or `ipus` (#11339) | 69,665 | 0 | 17 | 82 | 7 | 241,712 | 8 | lightning | 6 | tests/callbacks/test_device_stats_monitor.py | Python | 19 | {
"docstring": "Test GPU stats are logged using a logger with Pytorch >= 1.8.0.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | https://github.com/Lightning-AI/lightning.git | |
8 | expand_egg_links | def expand_egg_links(self) -> None:
prefixes = [
Path(prefix)
for prefix in self.base_paths["libdirs"].split(os.pathsep)
if vistir.path.is_in_path(prefix, self.prefix.as_posix())
]
for loc in prefixes:
if not loc.exists():
... | 4b996c0fa85824b323ad9eff3364dbe2213ebb4c | 16 | environment.py | 200 | Convert type comments to type annotations | 3,716 | 0 | 259 | 120 | 31 | 21,185 | 44 | pipenv | 26 | pipenv/environment.py | Python | 21 | {
"docstring": "\n Expand paths specified in egg-link files to prevent pip errors during\n reinstall\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 12
} | https://github.com/pypa/pipenv.git | |
3 | deconstruct | def deconstruct(self):
qs_class = self._queryset_class
if getattr(self, "_built_with_as_manager", False):
# using MyQuerySet.as_manager()
return (
True, # as_manager
None, # manager_class
"%s.%s" % (qs_class.__module__, q... | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 15 | manager.py | 192 | Refs #33476 -- Reformatted code with Black. | 51,170 | 0 | 511 | 115 | 73 | 205,712 | 107 | django | 15 | django/db/models/manager.py | Python | 28 | {
"docstring": "\n Return a 5-tuple of the form (as_manager (True), manager_class,\n queryset_class, args, kwargs).\n\n Raise a ValueError if the manager is dynamically generated.\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 21,
"vocab_size": 19
} | https://github.com/django/django.git | |
1 | right | def right(self):
from pandas import Index
return Index(self._right, copy=False)
| 62a69beddbedde349891378992c902c0b9341a9f | 8 | interval.py | 36 | DOC: Add numpydoc SS06 validation (#47885) | 40,192 | 0 | 30 | 21 | 9 | 168,085 | 9 | pandas | 6 | pandas/core/arrays/interval.py | Python | 3 | {
"docstring": "\n Return the right endpoints of each Interval in the IntervalArray as an Index.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | https://github.com/pandas-dev/pandas.git | |
1 | get_requires_for_build_sdist | def get_requires_for_build_sdist(self, config_settings=None):
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
| f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 10 | wrappers.py | 41 | upd; format | 13,098 | 0 | 41 | 23 | 9 | 63,010 | 9 | transferlearning | 4 | .venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py | Python | 4 | {
"docstring": "Identify packages required for building a wheel\n\n Returns a list of dependency specifications, e.g.::\n\n [\"setuptools >= 26\"]\n\n This does not include requirements specified in pyproject.toml.\n It returns the result of calling the equivalently named hook in a\n ... | https://github.com/jindongwang/transferlearning.git | |
2 | as_directory | def as_directory(self) -> Iterator[str]:
if self._local_path:
yield self._local_path
else:
temp_dir = self.to_directory()
yield temp_dir
shutil.rmtree(temp_dir, ignore_errors=True)
| d96ac251d7c9d12fadedfdfd903dc393f5bae217 | 11 | checkpoint.py | 70 | [air] Add `Checkpoint.as_directory()` for efficient checkpoint fs processing (#23908)
This PR adds a `Checkpoint_as_directory()` context manager that either returns the local path (if checkpoint is already a directory) or a temporary directory path containing the checkpoint data, which is cleaned up after use. The pat... | 34,158 | 0 | 81 | 41 | 14 | 148,044 | 16 | ray | 10 | python/ray/ml/checkpoint.py | Python | 29 | {
"docstring": "Return checkpoint directory path in a context.\n\n This function makes checkpoint data available as a directory while avoiding\n unnecessary copies and left-over temporary data.\n\n If the checkpoint is already a directory checkpoint, it will return\n the existing path. If ... | https://github.com/ray-project/ray.git | |
11 | update_billed_amount_based_on_so | def update_billed_amount_based_on_so(so_detail, update_modified=True):
from frappe.query_builder.functions import Sum
# Billed against Sales Order directly
si = frappe.qb.DocType("Sales Invoice").as_("si")
si_item = frappe.qb.DocType("Sales Invoice Item").as_("si_item")
sum_amount = Sum(si_item.amount).as_("amoun... | ce0b84f54d495fc78a6792a9b05d0eb1dc799ed2 | 19 | delivery_note.py | 708 | refactor: use frappe.qb instead of sql
(cherry picked from commit 0a9ec9f591f8b4d0e630a3c902b69c9996f080dd) | 13,585 | 0 | 162 | 440 | 120 | 64,242 | 214 | erpnext | 45 | erpnext/stock/doctype/delivery_note/delivery_note.py | Python | 48 | {
"docstring": "select sum(amount) from `tabSales Invoice Item`\n\t\t\t\twhere dn_detail=%s and docstatus=1",
"language": "en",
"n_whitespaces": 8,
"n_words": 10,
"vocab_size": 10
} | https://github.com/frappe/erpnext.git | |
2 | send | async def send(self, data) -> bool:
try:
await asyncio.wait_for(
self.queue.put(data),
timeout=self.drain_timeout
)
return True
except asyncio.TimeoutError:
return False
| 2b6d00dde449934db8789c860d5e0e9dc9c528ab | 13 | channel.py | 68 | initial channel api change | 35,044 | 0 | 113 | 41 | 17 | 151,551 | 18 | freqtrade | 11 | freqtrade/rpc/api_server/ws/channel.py | Python | 13 | {
"docstring": "\n Add the data to the queue to be sent.\n :returns: True if data added to queue, False otherwise\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 14
} | https://github.com/freqtrade/freqtrade.git | |
1 | test_second_get_event_cancelled | def test_second_get_event_cancelled(self):
with self.blocking_get_event_calls() as (unblock, get_event1, get_event2):
# Cancel the second `get_event` call.
get_event2.cancel()
# The first `get_event` call must not be cancelled.
self.assertNoResult(get_eve... | 8a87b4435a736cd42454cad7e57b65ec911f01fa | 11 | test_events_worker.py | 112 | Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db` (#12529)
Multiple calls to `EventsWorkerStore._get_events_from_cache_or_db` can
reuse the same database fetch, which is initiated by the first call.
Ensure that cancelling the first call doesn't cancel the other calls
sharing the same database ... | 72,077 | 0 | 189 | 64 | 40 | 248,060 | 54 | synapse | 15 | tests/storage/databases/main/test_events_worker.py | Python | 8 | {
"docstring": "Test cancellation of the second `get_event` call sharing a database fetch.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/matrix-org/synapse.git | |
4 | remove_column | def remove_column(self, i, *args, **kwargs):
table = self.table.remove_column(i, *args, **kwargs)
name = self.table.column_names[i]
blocks = []
for tables in self.blocks:
blocks.append(
[
t.remove_column(t.column_names.index(name),... | e35be138148333078284b942ccc9ed7b1d826f97 | 16 | table.py | 145 | Update docs to new frontend/UI (#3690)
* WIP: update docs to new UI
* make style
* Rm unused
* inject_arrow_table_documentation __annotations__
* hasattr(arrow_table_method, "__annotations__")
* Update task_template.rst
* Codeblock PT-TF-SPLIT
* Convert loading scripts
* Convert docs to mdx
... | 21,852 | 0 | 172 | 96 | 29 | 104,416 | 40 | datasets | 14 | src/datasets/table.py | Python | 12 | {
"docstring": "\n Create new Table with the indicated column removed.\n\n Args:\n i (:obj:`int`):\n Index of column to remove.\n\n Returns:\n :class:`datasets.table.Table`:\n New table without the column.\n ",
"language": "en",
"n_wh... | https://github.com/huggingface/datasets.git | |
1 | test_upload_room_keys_bogus_version | def test_upload_room_keys_bogus_version(self) -> None:
version = self.get_success(
self.handler.create_version(
self.local_user,
{
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
... | 652d1669c5a103b1c20478770c4aaf18849c09a3 | 13 | test_e2e_room_keys.py | 139 | Add missing type hints to tests.handlers. (#14680)
And do not allow untyped defs in tests.handlers. | 73,360 | 0 | 215 | 84 | 28 | 250,282 | 32 | synapse | 16 | tests/handlers/test_e2e_room_keys.py | Python | 20 | {
"docstring": "Check that we get a 404 on uploading keys when an nonexistent version\n is specified\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 15
} | https://github.com/matrix-org/synapse.git | |
1 | test_white_levels_to_color_temperature | def test_white_levels_to_color_temperature():
# Only cold channel enabled -> coldest color temperature
assert color_util._white_levels_to_color_temperature(255, 0, 2000, 6535) == (
6535,
255,
)
assert color_util._white_levels_to_color_temperature(128, 0, 2000, 6535) == (
653... | 47d0598e75487f63901931875f69f802a477df13 | 8 | test_color.py | 197 | Use Kelvin as the preferred color temperature unit (#79591)
* Use Kelvin as the preferred white temperature unit
* Update homekit
* Adjust tests | 87,759 | 0 | 251 | 145 | 36 | 288,603 | 99 | core | 3 | tests/util/test_color.py | Python | 29 | {
"docstring": "Test warm, cold conversion to color temp.\n\n Temperature values must be in mireds\n Home Assistant uses rgbcw for rgbww\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 19,
"vocab_size": 19
} | https://github.com/home-assistant/core.git | |
1 | _write_str_avoiding_backslashes | def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):
string, quote_types = self._str_literal_helper(string, quote_types=quote_types)
quote_type = quote_types[0]
self.write(f"{quote_type}{string}{quote_type}")
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 9 | ast.py | 77 | add python 3.10.4 for windows | 55,932 | 0 | 42 | 41 | 12 | 220,194 | 14 | XX-Net | 8 | python3.10.4/Lib/ast.py | Python | 4 | {
"docstring": "Write string literal value with a best effort attempt to avoid backslashes.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | https://github.com/XX-net/XX-Net.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.