repo
string | pull_number
string | instance_id
string | issue_numbers
list | base_commit
string | patch
string | test_patch
string | problem_statement
string | hints_text
string | all_hints_text
string | commit_urls
list | created_at
string | commit_url
string | rebuild_cmds
list | test_cmds
list | print_cmds
list | log_parser
string | FAIL_TO_PASS
list | PASS_TO_PASS
list | docker_image
string |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
samtools/samtools
|
2235
|
samtools__samtools-2235
|
[
"1563"
] |
abaa1583614a12cb446e8927b92723fdfde2025c
|
diff --git a/coverage.c b/coverage.c
index 92f497dc8..7fcb34f7e 100644
--- a/coverage.c
+++ b/coverage.c
@@ -1,7 +1,7 @@
/* coverage.c -- samtools coverage subcommand
Copyright (C) 2018,2019 Florian Breitwieser
- Portions copyright (C) 2019-2021, 2023-2024 Genome Research Ltd.
+ Portions copyright (C) 2019-2021, 2023-2025 Genome Research Ltd.
Author: Florian P Breitwieser <[email protected]>
@@ -119,6 +119,8 @@ static int usage(void) {
" -d, --depth INT maximum allowed coverage depth [1000000].\n"
" If 0, depth is set to the maximum integer value,\n"
" effectively removing any depth limit.\n"
+ " --min-depth INT minimum coverage depth below which a position \n"
+ " to be ignored [1]\n"
"Output options:\n"
" -m, --histogram show histogram instead of tabular output\n"
" -D, --plot-depth plot depth instead of tabular output\n"
@@ -317,7 +319,7 @@ int main_coverage(int argc, char *argv[]) {
int fail_flags = (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP); // Default fail flags
int required_flags = 0;
int print_value_warning = 0;
-
+ int mindepth = 1; //min depth in file, below which pos is ignored
int *n_plp = NULL;
sam_hdr_t *h = NULL; // BAM header of the 1st input
@@ -351,6 +353,7 @@ int main_coverage(int argc, char *argv[]) {
{"region", required_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{"depth", required_argument, NULL, 'd'},
+ {"min-depth", required_argument, NULL, 3},
{ NULL, 0, NULL, 0 }
};
@@ -367,6 +370,11 @@ int main_coverage(int argc, char *argv[]) {
if ((fail_flags = bam_str2flag(optarg)) < 0) {
fprintf(stderr,"Could not parse --ff %s\n", optarg); return EXIT_FAILURE;
}; break;
+ case 3: //min-depth
+ if ((i = atoi(optarg)) > 0) {
+ mindepth = i;
+ }
+ break;
case 'o': opt_output_file = optarg; opt_full_width = false; break;
case 'l': opt_min_len = atoi(optarg); break;
case 'q': opt_min_mapQ = atoi(optarg); break;
@@ -607,6 +615,7 @@ int main_coverage(int argc, char *argv[]) {
}
bool count_base = false;
+ unsigned long long summed_baseQ = 0, quality_bases = 0, depth = 0;
for (i = 0; i < n_bam_files; ++i) { // base level filters have to go here
int depth_at_pos = n_plp[i];
for (j = 0; j < n_plp[i]; ++j) {
@@ -618,8 +627,8 @@ int main_coverage(int argc, char *argv[]) {
if (bam_get_qual(p->b)[p->qpos] < opt_min_baseQ) {
--depth_at_pos; // low base quality
} else {
- stats[tid].summed_baseQ += bam_get_qual(p->b)[p->qpos];
- stats[tid].quality_bases++;
+ summed_baseQ += bam_get_qual(p->b)[p->qpos];
+ ++quality_bases;
}
} else {
print_value_warning = 1; // no quality at position
@@ -628,14 +637,18 @@ int main_coverage(int argc, char *argv[]) {
if (depth_at_pos > 0) {
count_base = true;
- stats[tid].summed_coverage += depth_at_pos;
+ depth += depth_at_pos;
}
if(current_bin < n_bins && opt_plot_coverage) {
hist[current_bin] += depth_at_pos;
}
}
- if (count_base) {
+ if (count_base && depth >= mindepth) {
+ stats[tid].summed_coverage += depth;
+ stats[tid].summed_baseQ += summed_baseQ;
+ stats[tid].quality_bases += quality_bases;
+
stats[tid].n_covered_bases++;
if (opt_print_histogram && current_bin < n_bins && !opt_plot_coverage)
++(hist[current_bin]); // Histogram based on breadth of coverage
diff --git a/doc/samtools-coverage.1 b/doc/samtools-coverage.1
index 41cfc80b6..55b16a1cc 100644
--- a/doc/samtools-coverage.1
+++ b/doc/samtools-coverage.1
@@ -3,7 +3,7 @@
.SH NAME
samtools coverage \- produces a histogram or table of coverage per chromosome
.\"
-.\" Copyright (C) 2019, 2021, 2023 Genome Research Ltd.
+.\" Copyright (C) 2019, 2021, 2023, 2025 Genome Research Ltd.
.\"
.\" Author: James Bonfield <[email protected]>
.\"
@@ -95,6 +95,9 @@ Filter flags: skip reads with mask bits set
.BI -d,\ --depth \ INT
Maximum allowed coverage depth [1000000]. If 0, depth is set to the maximum
integer value effectively removing any depth limit.
+.TP
+.BI --min-depth\ INT
+Minimum coverage depth, below which a position is ignored [1]
.PP
Output options:
|
diff --git a/test/coverage/1.expected b/test/coverage/1.expected
new file mode 100644
index 000000000..35cd07c2a
--- /dev/null
+++ b/test/coverage/1.expected
@@ -0,0 +1,3 @@
+#rname startpos endpos numreads covbases coverage meandepth meanbaseq meanmapq
+T1 1 40 6 19 47.5 0.725 7.62 36.7
+T2 1 40 6 8 20 0.4 7.12 49
diff --git a/test/coverage/2.expected b/test/coverage/2.expected
new file mode 100644
index 000000000..b4b909dc3
--- /dev/null
+++ b/test/coverage/2.expected
@@ -0,0 +1,3 @@
+#rname startpos endpos numreads covbases coverage meandepth meanbaseq meanmapq
+T1 1 40 6 10 25 0.5 7.5 36.7
+T2 1 40 6 6 15 0.35 7.43 49
diff --git a/test/coverage/3.expected b/test/coverage/3.expected
new file mode 100644
index 000000000..4165972a6
--- /dev/null
+++ b/test/coverage/3.expected
@@ -0,0 +1,3 @@
+#rname startpos endpos numreads covbases coverage meandepth meanbaseq meanmapq
+T2 1 40 6 1 2.5 0.05 8 49
+T1 1 40 0 0 0 0 0 0
diff --git a/test/coverage/4.expected b/test/coverage/4.expected
new file mode 100644
index 000000000..cd66a4283
--- /dev/null
+++ b/test/coverage/4.expected
@@ -0,0 +1,3 @@
+#rname startpos endpos numreads covbases coverage meandepth meanbaseq meanmapq
+T1 1 40 11 19 47.5 1.3 7.46 36.8
+T2 1 40 12 8 20 0.8 7.12 49
diff --git a/test/coverage/5.expected b/test/coverage/5.expected
new file mode 100644
index 000000000..adf91a007
--- /dev/null
+++ b/test/coverage/5.expected
@@ -0,0 +1,3 @@
+#rname startpos endpos numreads covbases coverage meandepth meanbaseq meanmapq
+T1 1 40 11 4 10 0.4 5.25 36.8
+T2 1 40 12 6 15 0.7 7.43 49
diff --git a/test/dat/sample.sam b/test/dat/sample.sam
new file mode 100644
index 000000000..0eaa44af7
--- /dev/null
+++ b/test/dat/sample.sam
@@ -0,0 +1,30 @@
+@HD VN:1.17 SO:coordinate
+@SQ SN:T1 LN:40
+@SQ SN:T2 LN:40
+@CO @SQ SN* LN* AH AN AS DS M5 SP TP UR
+@CO @RG ID* BC CN DS DT FO KS LB PG PI PL PM PU SM
+@CO @PG ID* PN CL PP DS VN
+@CO this is a dummy alignment file to demonstrate different abilities of hts apis
+@CO QNAME FLAG RNAME POS MAPQ CIGAR RNEXT PNEXT TLEN SEQ QUAL [TAG:TYPE:VALUE]…
+@CO 1234567890123456789012345678901234567890
+@CO AAAAACTGAAAACCCCTTTTGGGGACTGTTAACAGTTTTT T1
+@CO TTTTCCCCACTGAAAACCCCTTTTGGGGACTGTTAACAGT T2
+@CO ITR1-ITR2M, ITR2-ITR2M are proper pairs in T1 and T2, UNMP1 is partly mapped and pair is unmapped, UNMP2 & 3 are unmapped
+@CO A1-A2, A4-A3 are proper pairs with A4-A3 in different read order. A5 is secondary alignment
+@PG ID:samtools PN:samtools VN:1.22-6-g3036eb9 CL:./samtools sort -o /tmp/s.sam ../htslib/samples/sample.sam
+ITR1 99 T1 5 40 4M = 33 10 ACTG ()()
+UNMP1 73 T1 21 40 3M * 0 5 GGG &&1
+A1 99 T1 25 35 6M = 31 8 ACTGTT ******
+A5 355 T1 25 55 4M = 33 5 ACTG PPPP
+B1 99 T1 25 35 6M = 31 8 GCTATT ******
+B5 355 T1 25 35 4M = 33 5 AGTG PPPP
+A2 147 T1 31 33 6M = 25 -8 ACTGTT ()()()
+ITR1M 147 T1 33 37 4M = 5 -10 ACTG $$$$
+A4 99 T2 12 50 3M = 23 5 GAA ()(
+B4 99 T2 12 50 3M = 23 5 GAT ()(
+ITR2 147 T2 23 49 2M = 35 -10 TT **
+A3 147 T2 23 47 2M1X = 12 -5 TTG (((
+B3 147 T2 23 47 2M1X = 12 -5 TAG (((
+ITR2M 99 T2 35 51 2M = 23 10 AA &&
+UNMP2 141 * 0 0 * * 0 7 AA &&
+UNMP3 77 * 0 0 * * 0 5 GGG &&2
diff --git a/test/test.pl b/test/test.pl
index d183c7e9d..eb06ec99c 100755
--- a/test/test.pl
+++ b/test/test.pl
@@ -79,6 +79,7 @@
test_reset($opts);
test_checksum($opts);
test_checksum($opts, threads=>2);
+test_coverage($opts);
print "\nNumber of tests:\n";
printf " total .. %d\n", $$opts{nok}+$$opts{nfailed}+$$opts{nxfail}+$$opts{nxpass};
@@ -3984,3 +3985,22 @@ sub test_checksum
}
test_cmd($opts, out=>"checksum/chk2.2.expected", cmd=>"$$opts{bin}/samtools $chk -m $$opts{path}/checksum/chk2-*.tmp.chk | sed 's/\\(# Checksum[^:]*:\\).*/\\1/'");
}
+
+
+sub test_coverage
+{
+ my ($opts, %args) = @_;
+
+ #basic / existing
+ test_cmd($opts, out=>"coverage/1.expected", cmd=>"$$opts{bin}/samtools coverage $$opts{path}/dat/sample.sam");
+ #coverage --min-depth 1
+ test_cmd($opts, out=>"coverage/1.expected", cmd=>"$$opts{bin}/samtools coverage --min-depth 1 $$opts{path}/dat/sample.sam");
+ #coverage --min-depth 2
+ test_cmd($opts, out=>"coverage/2.expected", cmd=>"$$opts{bin}/samtools coverage --min-depth 2 $$opts{path}/dat/sample.sam");
+ #coverage --min-depth 2 -Q 8 -q 45
+ test_cmd($opts, out=>"coverage/3.expected", cmd=>"$$opts{bin}/samtools coverage --min-depth 2 -Q 8 -q 45 $$opts{path}/dat/sample.sam");
+ #shows coverage is based on all inputs
+ cmd("cat '$$opts{path}/dat/sample.sam' | sed '/A1/d' > $$opts{tmp}/sample1.sam");
+ test_cmd($opts, out=>"coverage/4.expected", cmd=>"$$opts{bin}/samtools coverage --min-depth 1 $$opts{path}/dat/sample.sam $$opts{tmp}/sample1.sam");
+ test_cmd($opts, out=>"coverage/5.expected", cmd=>"$$opts{bin}/samtools coverage --min-depth 4 $$opts{path}/dat/sample.sam $$opts{tmp}/sample1.sam");
+}
|
Minimum depth threshold for "samtools coverage"?
#### Is your feature request related to a problem? Please specify.
I'd like a simple way to calculate the coverage stats for a bam file. The closest out-of-the-box thing I can find is `samtools coverage`. I run the command like so:
```
samtools coverage input.bam
```
The simple summary stats are great, and what I need, _except_ for the fact that the 'covbases' statistic is defined as 'Number of covered bases with depth >= 1'. Instead, I would prefer to have the statistic of 'Number of covered bases with depth >= X', where X is a minimum depth defined by a command line flag.
#### Describe the solution you would like.
I would like to be able to use a command like:
```
samtools coverage --min-depth 20 input.bam
```
With the output statistics being calculated (in this case) only for bases covered with depth >=20. Would this be possible? So far I use a different command like:
```
bedtools genomecov -ibam input.bam -d | awk '$3 >= 20 { count++ } END { print (count/NR)*100 }'
```
But I'd prefer to have the much simpler `samtools` command and simplify the dependencies of my pipeline.
Would this be possible? Thanks!
|
Looks like a good idea, we'll consider it.
Hi Did you implement it in Samtools?
It doesn't look it, but this line is the thing you could manually tweak in a local copy:
https://github.com/samtools/samtools/blob/develop/coverage.c#L603
If changed to `>=10` for example it'd need a minimum of 10 deep.
However looking at this code I'm, confused as to quite what it's trying to do. It looks like it takes multiple bams and counts coverage as any BAM with depth at least 1. So if we modified it to say any BAM with depth at least 10, then is that what you want? What if we had 3 BAMs all of depth 7, summing to 21 but individually less than a min-depth of 10?
I'm assuming it would be a sum operation, given it can't report per-BAM stats, but it's a bit tricky to follow the logic here.
|
Looks like a good idea, we'll consider it.
Hi Did you implement it in Samtools?
It doesn't look it, but this line is the thing you could manually tweak in a local copy:
https://github.com/samtools/samtools/blob/develop/coverage.c#L603
If changed to `>=10` for example it'd need a minimum of 10 deep.
However looking at this code I'm, confused as to quite what it's trying to do. It looks like it takes multiple bams and counts coverage as any BAM with depth at least 1. So if we modified it to say any BAM with depth at least 10, then is that what you want? What if we had 3 BAMs all of depth 7, summing to 21 but individually less than a min-depth of 10?
I'm assuming it would be a sum operation, given it can't report per-BAM stats, but it's a bit tricky to follow the logic here.
|
[
"https://github.com/samtools/samtools/commit/3a769cfa9a71df943b00068566a171269e2789d3",
"https://github.com/samtools/samtools/commit/6a807af098c28ef534b684aaa2b305e5776145d2"
] |
2025-07-10T19:03:03Z
|
https://github.com/samtools/samtools/tree/abaa1583614a12cb446e8927b92723fdfde2025c
|
[
"autoheader && autoconf -Wno-syntax ; ./configure --with-htslib=htslib ; make"
] |
[
"make test V=1 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] |
def parser(log: str) -> dict[str, str]:
import re
results = {}
# Match lines like: test_name: <command> .. ok / .. failed
test_re = re.compile(r'^(?P<name>[^\s].*?):\s*$')
status_re = re.compile(r'^\.\. (ok|failed)', re.IGNORECASE)
current_test = None
for line in log.splitlines():
m = test_re.match(line)
if m:
current_test = m.group('name').strip()
continue
if current_test:
sm = status_re.match(line.strip())
if sm:
status_word = sm.group(1).lower()
if status_word == 'ok':
results[current_test] = 'pass'
elif status_word == 'failed':
results[current_test] = 'fail'
current_test = None
return results
|
[
"test_coverage"
] |
[
"test_large_positions",
"test_merge",
"test_collate",
"test_reference",
"test_split",
"test_quickcheck",
"test_markdup",
"test_bedcov",
"test_reheader",
"test_addrprg",
"test_sort",
"test_dict",
"test_head",
"test_fixmate",
"test_stats",
"test_idxstat",
"test_ampliconclip",
"test_import",
"test_checksum",
"test_bam2fq",
"test_mpileup",
"test_reset",
"test_usage_subcommand",
"test_index",
"test_ampliconstats",
"test_usage"
] |
starryzhang/sweb.eval.x86_64.samtools_1776_samtools-2235
|
micropython/micropython
|
17683
|
micropython__micropython-17683
|
[
"17657"
] |
554f114f181ee942ee3c74e44cef653604abbaef
|
diff --git a/ports/webassembly/objpyproxy.js b/ports/webassembly/objpyproxy.js
index 0eafd0dec53de..64703d78a5589 100644
--- a/ports/webassembly/objpyproxy.js
+++ b/ports/webassembly/objpyproxy.js
@@ -165,34 +165,35 @@ const py_proxy_handler = {
if (prop === "_ref") {
return target._ref;
}
- if (prop === "then") {
- return null;
- }
- if (prop === Symbol.iterator) {
- // Get the Python object iterator, and return a JavaScript generator.
- const iter_ref = Module.ccall(
- "proxy_c_to_js_get_iter",
- "number",
- ["number"],
- [target._ref],
- );
- return function* () {
- const value = Module._malloc(3 * 4);
- while (true) {
- const valid = Module.ccall(
- "proxy_c_to_js_iternext",
- "number",
- ["number", "pointer"],
- [iter_ref, value],
- );
- if (!valid) {
- break;
+ // ignore both then and all symbols but Symbol.iterator
+ if (prop === "then" || typeof prop !== "string") {
+ if (prop === Symbol.iterator) {
+ // Get the Python object iterator, and return a JavaScript generator.
+ const iter_ref = Module.ccall(
+ "proxy_c_to_js_get_iter",
+ "number",
+ ["number"],
+ [target._ref],
+ );
+ return function* () {
+ const value = Module._malloc(3 * 4);
+ while (true) {
+ const valid = Module.ccall(
+ "proxy_c_to_js_iternext",
+ "number",
+ ["number", "pointer"],
+ [iter_ref, value],
+ );
+ if (!valid) {
+ break;
+ }
+ yield proxy_convert_mp_to_js_obj_jsside(value);
}
- yield proxy_convert_mp_to_js_obj_jsside(value);
- }
- Module._free(value);
- };
+ Module._free(value);
+ };
+ }
+ return undefined;
}
const value = Module._malloc(3 * 4);
|
diff --git a/tests/ports/webassembly/py_proxy_get.mjs b/tests/ports/webassembly/py_proxy_get.mjs
new file mode 100644
index 0000000000000..825de7cabeb78
--- /dev/null
+++ b/tests/ports/webassembly/py_proxy_get.mjs
@@ -0,0 +1,14 @@
+// Test `<py-obj> get <attr>` on the JavaScript side, which tests PyProxy.get.
+
+const mp = await (await import(process.argv[2])).loadMicroPython();
+
+mp.runPython(`
+x = {"a": 1}
+`);
+
+const x = mp.globals.get("x");
+console.log(x.a === 1);
+console.log(x.b === undefined);
+console.log(typeof x[Symbol.iterator] === "function");
+console.log(x[Symbol.toStringTag] === undefined);
+console.log(x.then === undefined);
diff --git a/tests/ports/webassembly/py_proxy_get.mjs.exp b/tests/ports/webassembly/py_proxy_get.mjs.exp
new file mode 100644
index 0000000000000..36c7afad66a16
--- /dev/null
+++ b/tests/ports/webassembly/py_proxy_get.mjs.exp
@@ -0,0 +1,5 @@
+true
+true
+true
+true
+true
|
webassembly: getting JS symbols implicitly also throws errors
### Port, board and/or hardware
webassembly
### MicroPython version
MicroPython webassembly latest from *npm*
### Reproduction
In here we solved the `in` operation check https://github.com/micropython/micropython/pull/17604 but if any libraries tries to read the stringified version of a reference we have the same issue via the `get` proxy trap.
```js
function test(value) {
return Object.prototype.toString.call(value);
}
```
```python
import js
js.test({})
```
Result:
```
RuntimeError: Aborted(Assertion failed: stringToUTF8Array expects a string (got symbol))
```
### Expected behaviour
We covered `Symbol.iterator` we should cover also `Symbol.toStringTag` as that's implicitly retrieved when `Object.prototype.toString.call(ref)` happens, which is a very common way to retrieve the *kind* of the reference as it returns:
* `[object Object]`
* `[object Array]`
* `[object Null]`
* `[object Function]`
* `[object Anything]`
This practice is particularly useful when unknown kinds are around and any serializer would like to guess how to handle these, and it's currently breaking a specific PyScript use case which is also pretty common when used in IoT contrained environments.
### Observed behaviour
An error is throws every single time.
### Additional Information
No, I've provided everything above.
### Code of Conduct
Yes, I agree
|
thanks @dpgeorge , any chance this can make it to npm too?
|
[
"https://github.com/micropython/micropython/commit/c72a3e528d7909c212596b52de5f9a5fe0161f17"
] |
2025-07-15T08:45:08Z
|
https://github.com/micropython/micropython/tree/554f114f181ee942ee3c74e44cef653604abbaef
|
[
"make -C mpy-cross ; make -C ports/unix"
] |
[
"make -C ports/unix test 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] |
def parser(log: str) -> dict[str, str]:
import re
results: dict[str, str] = {}
for line in log.splitlines():
m = re.match(r'(pass|FAIL|skip)\s+(\S+)', line)
if m:
status_word, test_name = m.groups()
status_word = status_word.lower()
if status_word == "pass":
results[test_name] = "pass"
elif status_word == "fail":
results[test_name] = "fail"
elif status_word == "skip":
results[test_name] = "skip"
return results
|
[
"micropython/heapalloc_fail_dict.py",
"micropython/heapalloc_int_from_bytes.py",
"float/float_divmod.py",
"extmod/json_load.py",
"extmod/re1.py",
"float/cmath_dunder.py",
"extmod/vfs_userfs.py",
"float/builtin_float_minmax.py",
"float/lexer.py",
"basics/struct1.py",
"basics/try_else.py",
"micropython/viper_binop_arith.py"
] |
[
"basics/memoryview1.py",
"basics/int_big_rshift.py",
"io/file_stdio.py",
"extmod/re_sub.py",
"basics/bytearray_construct_endian.py",
"basics/dict_fromkeys.py",
"micropython/viper_binop_arith_uint.py",
"misc/non_compliant_lexer.py",
"basics/int_big_add.py",
"io/file_stdio2.py",
"basics/class_dict.py",
"basics/class3.py",
"basics/gen_yield_from_executing.py",
"basics/seq_unpack.py",
"basics/builtin_hex_intbig.py",
"basics/closure_manyvars.py",
"extmod/random_seed_default.py",
"basics/generator_close.py",
"extmod/re_error.py",
"basics/int_divmod.py",
"basics/string_escape_invalid.py",
"basics/list_sort.py",
"basics/int_big_unary.py",
"basics/builtin_str_hex.py",
"basics/class_instance_override.py",
"basics/int_big_to_small.py",
"basics/builtin_eval.py",
"basics/builtin_ord.py",
"basics/generator_name.py",
"basics/string_strip.py",
"basics/special_comparisons2.py",
"basics/int_big_mul.py",
"extmod/json_dumps_float.py",
"basics/list_clear.py",
"basics/bytes_construct_intbig.py",
"basics/decorator.py",
"basics/while1.py",
"basics/string_large.py",
"basics/async_with.py",
"extmod/time_time_ns.py",
"basics/set_iter.py",
"basics/gen_yield_from_pending.py",
"basics/subclass_native4.py",
"extmod/cryptolib_aes256_ecb.py",
"basics/return1.py",
"basics/int_small.py",
"extmod/asyncio_wait_for_fwd.py",
"basics/bytearray_slice_assign.py",
"micropython/ringio_async.py",
"extmod/framebuf_palette.py",
"extmod/vfs_fat_fileio2.py",
"extmod/btree_error.py",
"basics/set_clear.py",
"basics/fun_name.py",
"extmod/ticks_diff.py",
"basics/int_big_xor3.py",
"basics/try_finally_return.py",
"basics/array_construct2.py",
"unicode/unicode_id.py",
"cmdline/cmd_showbc.py",
"micropython/import_mpy_invalid.py",
"basics/builtin_hash_intbig.py",
"extmod/json_loads_bytes.py",
"cmdline/repl_emacs_keys.py",
"import/import_star.py",
"micropython/viper_unop.py",
"micropython/heapalloc_str.py",
"basics/gen_yield_from_send.py",
"micropython/viper_with.py",
"misc/sys_atexit.py",
"basics/floordivide_intbig.py",
"basics/stopiteration.py",
"extmod/hashlib_sha256.py",
"extmod/vfs_fat_finaliser.py",
"micropython/heapalloc_exc_compressed_emg_exc.py",
"micropython/heapalloc_bytesio2.py",
"basics/for_break.py",
"import/import_pkg8.py",
"basics/string_compare.py",
"basics/dict_clear.py",
"io/open_append.py",
"extmod/vfs_lfs.py",
"micropython/native_const.py",
"basics/slots_bool_len.py",
"basics/int_big_mod.py",
"basics/string_fstring_invalid.py",
"basics/closure_namedarg.py",
"io/builtin_print_file.py",
"basics/class_binop.py",
"basics/del_global.py",
"basics/bytearray_count.py",
"extmod/re_split_empty.py",
"basics/string_format_cp310.py",
"micropython/heapalloc_iter.py",
"basics/lambda_defargs.py",
"basics/frozenset_add.py",
"micropython/heap_lock.py",
"import/import_pkg5.py",
"basics/set_union.py",
"basics/int_big_lshift.py",
"basics/io_write_ext.py",
"basics/builtin_ellipsis.py",
"basics/fun_calldblstar.py",
"float/math_constants.py",
"basics/continue.py",
"extmod/cryptolib_aes128_ecb.py",
"basics/bytearray_decode.py",
"extmod/tls_noleak.py",
"basics/try_as_var.py",
"basics/ordereddict_eq.py",
"extmod/hashlib_final.py",
"basics/io_bytesio_ext2.py",
"basics/string_format_intbig.py",
"stress/dict_copy.py",
"basics/dict_specialmeth.py",
"basics/compare_multi.py",
"basics/int_big1.py",
"basics/int_parse.py",
"basics/subclass_native_call.py",
"basics/string_cr_conversion.py",
"micropython/viper_ptr32_store.py",
"basics/fun_globals.py",
"basics/globals_del.py",
"micropython/viper_subscr_multi.py",
"extmod/uctypes_sizeof_native.py",
"basics/set_type.py",
"basics/special_methods.py",
"basics/string1.py",
"basics/exceptpoly2.py",
"basics/special_methods2.py",
"import/gen_context.py",
"micropython/viper_addr.py",
"basics/gen_yield_from_throw.py",
"basics/try_finally_return2.py",
"basics/gen_yield_from_exc.py",
"basics/async_def.py",
"basics/set_update.py",
"basics/int_big_xor2.py",
"extmod/uctypes_array_assign_native_le_intbig.py",
"basics/gen_yield_from_iter.py",
"basics/try_return.py",
"extmod/binascii_crc32.py",
"basics/tuple_index.py",
"basics/frozenset_binop.py",
"basics/dict2.py",
"basics/dict_pop.py",
"basics/subclass_native_exc_new.py",
"basics/sys_exit.py",
"unicode/unicode_index.py",
"extmod/re_split_notimpl.py",
"extmod/asyncio_cancel_self.py",
"import/import1a.py",
"import/builtin_ext.py",
"basics/try_finally_continue.py",
"basics/int_bytes_optional_args_cp311.py",
"basics/op_precedence.py",
"basics/builtin_help.py",
"micropython/heapalloc_fail_memoryview.py",
"extmod/json_dumps.py",
"basics/op_error_literal.py",
"basics/with_raise.py",
"basics/exception1.py",
"basics/memoryview_slice_assign.py",
"extmod/binascii_hexlify.py",
"extmod/uctypes_le.py",
"extmod/asyncio_gather_finished_early.py",
"basics/0prelim.py",
"basics/builtin_next_arg2.py",
"micropython/viper_ptr32_load.py",
"extmod/asyncio_iterator_event.py",
"basics/fun3.py",
"io/file_long_read2.py",
"basics/string_upperlow.py",
"basics/string_startswith.py",
"basics/list_compare.py",
"basics/gen_yield_from_throw3.py",
"extmod/ssl_sslcontext_verify_mode2.py",
"basics/generator2.py",
"basics/self_type_check.py",
"micropython/meminfo.py",
"io/file_long_read3.py",
"extmod/asyncio_micropython.py",
"basics/assign_expr.py",
"micropython/viper_binop_bitwise_uint.py",
"basics/io_iobase.py",
"micropython/decorator.py",
"basics/string_fstring.py",
"micropython/native_const_intbig.py",
"micropython/viper_ptr16_load.py",
"basics/bytearray_construct.py",
"extmod/asyncio_new_event_loop.py",
"extmod/ssl_basic.py",
"basics/fun_kwonly.py",
"micropython/viper_import.py",
"extmod/json_dumps_ordereddict.py",
"basics/set_unop.py",
"basics/object1.py",
"basics/builtin_map.py",
"basics/io_bytesio_ext.py",
"float/float_struct_e.py",
"basics/fun_annotations.py",
"micropython/heapalloc_fail_bytearray.py",
"basics/builtin_round_int.py",
"basics/slice_attrs.py",
"micropython/import_mpy_native.py",
"basics/bytes_subscr.py",
"unicode/file_invalid.py",
"basics/bytearray_partition.py",
"cmdline/cmd_verbose.py",
"basics/subclass_native2_list.py",
"basics/dict1.py",
"basics/builtin_oct_intbig.py",
"basics/class_super_aslocal.py",
"float/float1.py",
"float/string_format2.py",
"basics/gen_yield_from_stopped.py",
"extmod/asyncio_loop_stop.py",
"basics/gen_yield_from_throw2.py",
"extmod/uctypes_ptr_le.py",
"unicode/unicode_ord.py",
"extmod/json_dump_separators.py",
"basics/bytearray_center.py",
"cmdline/repl_inspect.py",
"extmod/tls_sslcontext_ciphers.py",
"extmod/framebuf_scroll.py",
"extmod/vfs_lfs_mount.py",
"basics/set_specialmeth.py",
"extmod/time_res.py",
"basics/for_range.py",
"basics/string_endswith.py",
"float/complex1_intbig.py",
"basics/set_basic.py",
"extmod/asyncio_set_exception_handler.py",
"basics/builtin_exec.py",
"basics/class_bind_self.py",
"basics/python34.py",
"extmod/re_stack_overflow.py",
"extmod/cryptolib_aes256_cbc.py",
"basics/builtin_oct.py",
"basics/generator_return.py",
"micropython/builtin_execfile.py",
"micropython/viper_ptr8_store_boundary.py",
"basics/bytearray_add.py",
"basics/dict_update.py",
"extmod/btree1.py",
"extmod/vfs_rom.py",
"basics/subclass_native_specmeth.py",
"import/import_pkg4.py",
"micropython/viper_ptr8_store.py",
"basics/list_reverse.py",
"basics/special_comparisons.py",
"extmod/ssl_keycert.py",
"basics/io_stringio_with.py",
"basics/try_finally_break.py",
"stress/gc_trace.py",
"extmod/vfs_posix_ilistdir_filter.py",
"basics/module1.py",
"basics/list_pop.py",
"basics/dict_get.py",
"float/builtin_float_abs.py",
"micropython/const_intbig.py",
"basics/builtin_round_intbig.py",
"stress/recursive_iternext.py",
"basics/with_break.py",
"micropython/const.py",
"import/module_getattr.py",
"micropython/native_while.py",
"basics/fun1.py",
"basics/namedtuple1.py",
"basics/subclass_native2_tuple.py",
"extmod/asyncio_event_fair.py",
"basics/string_rsplit.py",
"basics/fun_defargs2.py",
"basics/array1.py",
"basics/string_find.py",
"ports/unix/ffi_callback.py",
"basics/list1.py",
"extmod/framebuf_subclass.py",
"basics/int_big_or.py",
"basics/list_extend.py",
"basics/list_mult.py",
"extmod/uctypes_byteat.py",
"basics/class_number.py",
"basics/list_slice.py",
"basics/fun_callstar.py",
"micropython/heapalloc_super.py",
"micropython/heapalloc_fail_tuple.py",
"extmod/vfs_lfs_mtime.py",
"basics/bytes_add.py",
"basics/list_insert.py",
"basics/comprehension1.py",
"basics/break.py",
"basics/try1.py",
"micropython/heapalloc_exc_raise.py",
"micropython/viper_binop_comp_imm.py",
"basics/generator_throw.py",
"basics/set_add.py",
"basics/bytes_construct_endian.py",
"micropython/heapalloc_fail_set.py",
"extmod/machine1.py",
"basics/errno1.py",
"extmod/select_poll_eintr.py",
"float/math_fun_intbig.py",
"basics/memoryview_intbig.py",
"basics/int_divmod_intbig.py",
"basics/list_slice_3arg.py",
"basics/try_except_break.py",
"float/cmath_fun.py",
"basics/class_new.py",
"basics/string_format_error.py",
"float/string_format_modulo3.py",
"basics/bytes_gen.py",
"basics/set_remove.py",
"basics/array_add.py",
"basics/builtin_chr.py",
"basics/fun_calldblstar2.py",
"extmod/uctypes_addressof.py",
"basics/getitem.py",
"ports/unix/time_mktime_localtime.py",
"basics/builtin_divmod_intbig.py",
"extmod/re_limit.py",
"extmod/uctypes_sizeof_layout.py",
"basics/builtin_pow3_intbig.py",
"basics/class_use_other.py",
"basics/try_reraise.py",
"basics/gen_yield_from_close.py",
"basics/bytearray1.py",
"extmod/uctypes_sizeof.py",
"extmod/time_mktime.py",
"float/math_fun.py",
"basics/dict_setdefault.py",
"basics/fun_kwonlydef.py",
"basics/floordivide.py",
"basics/class_inplace_op.py",
"basics/class_store.py",
"io/file_long_read.py",
"basics/bytes_strip.py",
"basics/builtin_dir.py",
"basics/class_bases.py",
"extmod/uctypes_print.py",
"basics/ifcond.py",
"extmod/uctypes_bytearray.py",
"basics/string_split.py",
"extmod/asyncio_fair.py",
"micropython/viper_subscr.py",
"extmod/hashlib_sha1.py",
"basics/builtin_override.py",
"stress/recursive_gen.py",
"basics/fun_kwargs.py",
"float/float_parse_doubleprec.py",
"basics/int_constfolding_intbig.py",
"micropython/viper_ptr32_load_boundary.py",
"basics/annotate_var.py",
"basics/fun_varargs.py",
"unicode/unicode_subscr.py",
"misc/rge_sm.py",
"float/math_domain.py",
"extmod/websocket_basic.py",
"micropython/native_try_deep.py",
"basics/scope_implicit.py",
"import/rel_import_inv.py",
"basics/op_error.py",
"basics/builtin_len1.py",
"stress/dict_create.py",
"basics/del_local.py",
"basics/class_emptybases.py",
"micropython/ringio.py",
"basics/generator_args.py",
"basics/subclass_native_cmp.py",
"micropython/viper_binop_divmod.py",
"extmod/re_sub_unmatched.py",
"float/float_format_ftoe.py",
"float/float_divmod_relaxed.py",
"float/float2int_fp30_intbig.py",
"basics/bytes_center.py",
"extmod/asyncio_as_uasyncio.py",
"basics/fun_defargs.py",
"basics/fun_kwvarargs.py",
"micropython/emg_exc.py",
"basics/builtin_hash.py",
"basics/class_descriptor.py",
"micropython/kbd_intr.py",
"basics/bytearray_append.py",
"basics/set_containment.py",
"extmod/asyncio_gather_notimpl.py",
"basics/class_reverse_op.py",
"basics/module2.py",
"micropython/viper_const_intbig.py",
"extmod/asyncio_basic.py",
"micropython/viper_ptr16_store_boundary.py",
"basics/bool1.py",
"float/string_format_modulo2.py",
"basics/try_continue.py",
"float/float_format_ints.py",
"basics/builtin_bin_intbig.py",
"basics/is_isnot_literal.py",
"basics/generator1.py",
"io/file1.py",
"basics/object_new.py",
"basics/class_super.py",
"unicode/unicode_str_modulo.py",
"basics/dict_intern.py",
"basics/bytes_replace.py",
"basics/builtin_issubclass.py",
"basics/syntaxerror_return.py",
"basics/int1.py",
"float/complex_dunder.py",
"basics/del_deref.py",
"extmod/uctypes_array_assign_le.py",
"micropython/heapalloc_fail_list.py",
"micropython/extreme_exc.py",
"basics/op_error_intbig.py",
"import/module_dict.py",
"float/builtin_float_pow.py",
"basics/struct_endian.py",
"extmod/vfs_fat_more.py",
"basics/frozenset_difference.py",
"basics/try_finally1.py",
"micropython/native_for.py",
"basics/bytes_construct_bytearray.py",
"basics/while_nest_exc.py",
"basics/deque2.py",
"basics/builtin_locals.py",
"basics/gen_yield_from_ducktype.py",
"basics/bytes_add_array.py",
"basics/for1.py",
"basics/string_format2.py",
"basics/fun2.py",
"basics/int_big_and.py",
"basics/class_ordereddict.py",
"basics/array_construct_endian.py",
"extmod/vfs_lfs_superblock.py",
"basics/builtin_eval_error.py",
"basics/int_bytes_int64.py",
"float/inf_nan_arith.py",
"micropython/native_fun_attrs.py",
"basics/for3.py",
"extmod/vfs_fat_fileio1.py",
"basics/fun_error.py",
"basics/bytes_compare3.py",
"extmod/re_split.py",
"basics/array_construct.py",
"float/float2int_doubleprec_intbig.py",
"basics/dict_from_iter.py",
"float/int_divzero.py",
"micropython/viper_args.py",
"import/import_override.py",
"basics/assign1.py",
"basics/struct2.py",
"unicode/unicode.py",
"extmod/asyncio_cancel_task.py",
"basics/builtin_filter.py",
"basics/list_slice_assign.py",
"basics/builtin_reversed.py",
"basics/dict_popitem.py",
"extmod/uctypes_native_le.py",
"basics/bytes_large.py",
"basics/class_notimpl.py",
"extmod/ssl_sslcontext.py",
"basics/subclass_native_iter.py",
"extmod/asyncio_basic2.py",
"micropython/const2.py",
"basics/builtin_eval_buffer.py",
"basics/class_delattr_setattr.py",
"misc/non_compliant.py",
"float/float_dunder.py",
"extmod/asyncio_heaplock.py",
"micropython/viper_ptr16_load_boundary.py",
"basics/try2.py",
"basics/lambda1.py",
"io/file_with.py",
"micropython/viper_misc2.py",
"float/builtin_float_hash.py",
"basics/bytes_format_modulo.py",
"float/float_format_ints_power10.py",
"basics/closure1.py",
"basics/try_finally2.py",
"import/import_pkg2.py",
"basics/module_dict.py",
"basics/class_call.py",
"basics/with1.py",
"micropython/stack_use.py",
"extmod/cryptolib_aes128_cbc.py",
"basics/async_await.py",
"basics/class_misc.py",
"basics/unboundlocal.py",
"basics/async_with_return.py",
"basics/for_else.py",
"extmod/asyncio_await_return.py",
"float/true_value.py",
"basics/gen_yield_from_throw_multi_arg.py",
"extmod/btree_gc.py",
"basics/int_big_and3.py",
"io/file_iter.py",
"basics/list_copy.py",
"basics/int_intbig.py",
"float/math_dunder.py",
"basics/bytearray_add_self.py",
"extmod/btree_closed.py",
"basics/generator_closure.py",
"basics/builtin_id.py",
"basics/builtin_super.py",
"extmod/binascii_a2b_base64.py",
"import/import3a.py",
"micropython/heapalloc_inst_call.py",
"cmdline/repl_basic.py",
"micropython/viper_globals.py",
"basics/closure2.py",
"micropython/viper_ptr8_load_boundary.py",
"import/import_pkg3.py",
"extmod/vfs_posix_enoent.py",
"extmod/asyncio_lock.py",
"micropython/viper_binop_multi_comp.py",
"extmod/asyncio_get_event_loop.py",
"cmdline/repl_cont.py",
"basics/async_await2.py",
"import/import1b.py",
"basics/ifexpr.py",
"basics/subclass_native3.py",
"basics/class_staticclassmethod.py",
"basics/set_copy.py",
"extmod/vfs_lfs_file.py",
"basics/iter1.py",
"extmod/uctypes_sizeof_float.py",
"extmod/framebuf8.py",
"micropython/viper_types.py",
"extmod/ssl_sslcontext_verify_mode.py",
"basics/bytes_escape_unicode.py",
"extmod/vfs_fat_oldproto.py",
"float/bytes_construct_endian.py",
"basics/string_replace.py",
"micropython/viper_cond.py",
"basics/io_stringio1.py",
"basics/builtin_sorted.py",
"basics/builtin_allany.py",
"basics/sys_path.py",
"basics/bytes_partition.py",
"basics/builtin_pow.py",
"basics/builtin_abs.py",
"basics/for2.py",
"float/float_struct.py",
"basics/int_big_div.py",
"basics/dict_iterator.py",
"basics/iter0.py",
"basics/class_contains.py",
"basics/string_escape.py",
"basics/andor.py",
"import/import_broken.py",
"basics/iter2.py",
"float/cmath_fun_special.py",
"basics/builtin_hasattr.py",
"micropython/native_try.py",
"basics/unary_op.py",
"basics/int_big_error.py",
"basics/builtin_sum.py",
"basics/memoryerror.py",
"extmod/cryptolib_aes128_ecb_inpl.py",
"basics/string_format_modulo_int.py",
"basics/special_methods_intbig.py",
"basics/slice_indices.py",
"cmdline/repl_autocomplete.py",
"micropython/const_alltypes.py",
"float/list_index.py",
"basics/bytearray_construct_array.py",
"basics/string_join.py",
"float/bytearray_construct_endian.py",
"micropython/viper_ptr32_store_boundary.py",
"basics/builtin_hash_gen.py",
"float/float_format.py",
"basics/list_sum.py",
"float/string_format_fp30.py",
"basics/del_name.py",
"float/int_big_float.py",
"basics/generator_pep479.py",
"basics/dict_fixed.py",
"extmod/re_namedclass.py",
"io/file_seek.py",
"basics/class_store_class.py",
"extmod/framebuf_blit.py",
"micropython/viper_binop_comp_uint.py",
"basics/lexer.py",
"basics/class_str.py",
"basics/builtin_divmod.py",
"basics/bytes_compare_array.py",
"basics/generator_send.py",
"basics/builtin_print.py",
"basics/subclass_classmethod.py",
"micropython/viper_misc_intbig.py",
"basics/memoryview_gc.py",
"basics/types1.py",
"float/string_format.py",
"ports/unix/ffi_float.py",
"basics/gc1.py",
"basics/generator_throw_nested.py",
"basics/class2.py",
"basics/gen_yield_from_throw_repeat.py",
"basics/int_bytes.py",
"basics/object_dict.py",
"misc/features.py",
"basics/builtin_callable.py",
"basics/string_format.py",
"cmdline/repl_words_move.py",
"basics/builtin_bin.py",
"basics/exceptpoly.py",
"stress/bytecode_limit.py",
"basics/bytes_compare2.py",
"extmod/machine_pinbase.py",
"basics/types2.py",
"float/string_format_modulo2_intbig.py",
"micropython/viper_error.py",
"micropython/native_closure.py",
"extmod/tls_dtls.py",
"basics/fun_callstardblstar.py",
"basics/try_finally_return3.py",
"stress/dict_create_max.py",
"basics/string_rpartition.py",
"basics/string_splitlines.py",
"extmod/json_dump_iobase.py",
"basics/frozenset_copy.py",
"basics/dict_views.py",
"basics/async_for.py",
"basics/attrtuple1.py",
"io/file_readline.py",
"basics/bytes_compare_bytearray.py",
"basics/dict_fromkeys2.py",
"extmod/binascii_unhexlify.py",
"basics/list_count.py",
"basics/subclass_native_init.py",
"basics/try_else_finally.py",
"extmod/ssl_cadata.py",
"extmod/binascii_b2a_base64.py",
"basics/bytes.py",
"basics/set_discard.py",
"basics/syntaxerror.py",
"basics/builtin_abs_intbig.py",
"basics/set_pop.py",
"micropython/heapalloc_slice.py",
"misc/print_exception.py",
"basics/deque_slice.py",
"import/gen_context2.py",
"import/import_pkg1.py",
"micropython/viper_ptr16_store.py",
"basics/int_constfolding.py",
"basics/bytes_add_bytearray.py",
"micropython/viper_ptr8_load.py",
"basics/tuple1.py",
"basics/bytearray_intbig.py",
"basics/list_slice_assign_grow.py",
"micropython/heapalloc_bytesio.py",
"basics/gen_yield_from.py",
"basics/builtin_property_inherit.py",
"extmod/random_extra_float.py",
"basics/list_index.py",
"extmod/json_loads.py",
"basics/struct_micropython.py",
"basics/for_return.py",
"extmod/vfs_lfs_corrupt.py",
"extmod/asyncio_wait_task.py",
"import/import_pkg7.py",
"unicode/unicode_pos.py",
"extmod/json_loads_float.py",
"basics/fun_calldblstar4.py",
"extmod/uctypes_sizeof_od.py",
"basics/builtin_range_attrs.py",
"basics/builtin_slice.py",
"basics/set_iter_of_iter.py",
"basics/while_cond.py",
"extmod/uctypes_32bit_intbig.py",
"basics/closure_defargs.py",
"extmod/asyncio_event.py",
"basics/async_with2.py",
"basics/int_big_pow.py",
"basics/string_rfind.py",
"basics/set_binop.py",
"basics/unpack1.py",
"extmod/deflate_decompress.py",
"extmod/vfs_blockdev.py",
"basics/op_error_bytearray.py",
"basics/equal.py",
"micropython/heapalloc_traceback.py",
"basics/generator_exc.py",
"basics/int_big_xor.py",
"basics/try_finally_break2.py",
"extmod/json_dump.py",
"basics/bytes_compare.py",
"extmod/vfs_blockdev_invalid.py",
"cmdline/repl_autoindent.py",
"basics/class_super_object.py",
"basics/builtin_hex.py",
"float/math_constants_extra.py",
"basics/try4.py",
"extmod/vfs_lfs_ilistdir_del.py",
"extmod/select_poll_basic.py",
"extmod/vfs_fat_mtime.py",
"basics/builtin_setattr.py",
"basics/scope.py",
"extmod/cryptolib_aes128_ecb_into.py",
"basics/generator_throw_repeat.py",
"basics/bytes_mult.py",
"micropython/heapalloc_exc_compressed.py",
"basics/try_finally_return4.py",
"basics/builtin_exec_buffer.py",
"basics/fun_error2.py",
"extmod/vfs_posix_paths.py",
"extmod/uctypes_ptr_native_le.py",
"import/import_circular.py",
"float/math_factorial_intbig.py",
"io/file_readinto.py",
"basics/subclass_native1.py",
"basics/sys_stdio.py",
"basics/class1.py",
"basics/subclass_native_buffer.py",
"micropython/opt_level_lineno.py",
"extmod/platform_basic.py",
"basics/try_reraise2.py",
"io/open_plus.py",
"basics/int2.py",
"cmdline/cmd_showbc_opt.py",
"basics/deque1.py",
"stress/recursion.py",
"basics/async_with_break.py",
"basics/set_symmetric_difference.py",
"float/float_compare.py",
"basics/op_error_memoryview.py",
"import/import_star_error.py",
"extmod/asyncio_lock_cancel.py",
"basics/builtin_property.py",
"extmod/machine_signal.py",
"basics/set_intersection.py",
"extmod/framebuf4.py",
"basics/tuple_mult.py",
"extmod/socket_tcp_basic.py",
"micropython/heapalloc.py",
"basics/set_comprehension.py",
"basics/generator_pend_throw.py",
"basics/getattr.py",
"basics/string_slice.py",
"basics/string_center.py",
"extmod/framebuf2.py",
"basics/builtin_type.py",
"float/string_format_modulo.py",
"basics/string_format_modulo.py",
"basics/boundmeth1.py",
"basics/bytes_count.py",
"basics/parser.py",
"basics/builtin_pow3.py",
"basics/try_finally_loops.py",
"ports/unix/ffi_float2.py",
"basics/dict_construct.py",
"float/math_fun_bool.py",
"ports/unix/mod_os.py",
"extmod/asyncio_wait_for_linked_task.py",
"extmod/cryptolib_aes128_ecb_enc.py",
"basics/generator_throw_multi_arg.py",
"micropython/opt_level.py",
"basics/fun_str.py",
"extmod/asyncio_cancel_fair2.py",
"basics/fun_calldblstar3.py",
"basics/int_big_zeroone.py",
"basics/class_item.py",
"micropython/memstats.py",
"extmod/heapq1.py",
"extmod/asyncio_gather.py",
"basics/int_big_cmp.py",
"basics/fun_largestate.py",
"stress/fun_call_limit.py",
"basics/builtin_compile.py",
"basics/builtin_range.py",
"extmod/uctypes_error.py",
"float/complex1.py",
"basics/set_difference.py",
"extmod/framebuf_polygon.py",
"extmod/vfs_fat_ramdisklarge.py",
"extmod/machine_pulse.py",
"basics/exception_chain.py",
"basics/class_inherit1.py",
"basics/bytes_find.py",
"extmod/asyncio_threadsafeflag.py",
"float/math_fun_special.py",
"extmod/random_basic.py",
"basics/subclass_native_str.py",
"extmod/uctypes_native_float.py",
"extmod/asyncio_current_task.py",
"basics/string_partition.py",
"basics/del_subscr.py",
"extmod/uctypes_le_float.py",
"basics/subclass_native5.py",
"float/builtin_float_round.py",
"unicode/file1.py",
"basics/ordereddict1.py",
"basics/string_mult.py",
"basics/logic_constfolding.py",
"import/import2a.py",
"basics/tuple_count.py",
"float/float2int_intbig.py",
"unicode/unicode_slice.py",
"micropython/viper_misc.py",
"extmod/ticks_add.py",
"basics/array_intbig.py",
"basics/deque_micropython.py",
"extmod/ssl_keycert_pkcs8.py",
"float/complex_special_methods.py",
"float/math_domain_python311.py",
"unicode/unicode_chr.py",
"basics/iter_of_iter.py",
"micropython/viper_const.py",
"micropython/schedule.py",
"micropython/viper_binop_comp.py",
"basics/dict_del.py",
"basics/slice_intbig.py",
"basics/sys1.py",
"basics/with_continue.py",
"extmod/asyncio_cancel_fair.py",
"basics/memoryview2.py",
"basics/bytes_construct.py",
"float/math_isclose.py",
"basics/string_istest.py",
"basics/int_bytes_intbig.py",
"import/import_long_dyn.py",
"io/argv.py",
"basics/async_for2.py",
"basics/frozenset1.py",
"stress/recursive_data.py",
"basics/class_inherit_mul.py",
"cmdline/repl_autocomplete_underscore.py",
"basics/tuple_compare.py",
"unicode/unicode_iter.py",
"extmod/time_ms_us.py",
"cmdline/cmd_showbc_const.py",
"basics/string_fstring_debug.py",
"basics/slice_op.py",
"basics/try_finally_return5.py",
"basics/io_bytesio_cow.py",
"basics/except_match_tuple.py",
"basics/bytes_split.py",
"extmod/framebuf_bounds.py",
"basics/frozenset_set.py",
"basics/containment.py",
"basics/class_super_multinherit.py",
"micropython/viper_storeattr.py",
"extmod/json_dumps_separators.py",
"basics/set_isfooset.py",
"basics/class_getattr.py",
"extmod/vfs_fat_ramdisk.py",
"basics/sys_stdio_buffer.py",
"extmod/uctypes_array_assign_native_le.py",
"basics/try_error.py",
"misc/sys_exc_info.py",
"basics/builtin_enumerate.py",
"extmod/tls_threads.py",
"float/python36.py",
"basics/string_repr.py",
"extmod/re_group.py",
"basics/equal_class.py",
"float/builtin_float_round_intbig.py",
"basics/memoryview_slice_size.py",
"float/types.py",
"float/float_parse.py",
"micropython/viper_misc3.py",
"extmod/vfs_lfs_error.py",
"basics/int_divzero.py",
"basics/list_remove.py",
"basics/with_return.py",
"basics/array_micropython.py",
"basics/set_isdisjoint.py",
"stress/list_sort.py",
"basics/dict_copy.py",
"micropython/viper_try.py",
"unicode/unicode_str_format.py",
"basics/bytearray_byte_operations.py",
"basics/builtin_zip.py",
"basics/dict_union.py",
"basics/bytes_add_endian.py",
"extmod/framebuf_ellipse.py",
"basics/async_syntaxerror.py",
"basics/string_rindex.py",
"basics/scope_class.py",
"basics/builtin_round.py",
"basics/builtin_minmax.py",
"basics/int_big_or3.py",
"extmod/uctypes_array_load_store.py",
"basics/int_big_and2.py",
"extmod/asyncio_wait_for.py",
"extmod/asyncio_cancel_wait_on_finished.py",
"basics/struct1_intbig.py",
"basics/builtin_getattr.py",
"basics/assign_expr_syntaxerror.py",
"basics/string_index.py",
"io/file_readinto_len.py",
"float/math_fun_int.py",
"basics/true_value.py",
"extmod/tls_sslcontext_micropython.py",
"micropython/native_misc.py",
"float/float_format_ints_doubleprec.py",
"basics/class_super_closure.py",
"cmdline/repl_micropyinspect.py",
"basics/assign_expr_scope.py",
"extmod/framebuf1.py",
"basics/bytes_construct_array.py",
"basics/builtin_delattr.py",
"unicode/file2.py",
"stress/qstr_limit.py",
"micropython/native_gen.py",
"basics/python36.py",
"basics/is_isnot.py"
] |
starryzhang/sweb.eval.x86_64.micropython_1776_micropython-17683
|
|
timescale/timescaledb
|
8559
|
timescale__timescaledb-8559
|
[
"6902"
] |
bbbebbfa978465bfdf6d0bac418ee4d269ad4da8
|
diff --git a/.unreleased/pr_8559 b/.unreleased/pr_8559
new file mode 100644
index 00000000000..79123483309
--- /dev/null
+++ b/.unreleased/pr_8559
@@ -0,0 +1,2 @@
+Fixes: #8559 Fix `timestamp out of range` using `end_offset=NULL` on CAgg refresh policy
+Thanks: @nofalx for reporting the error when using `end_offset=NULL` on CAgg refresh policy
diff --git a/tsl/src/bgw_policy/continuous_aggregate_api.c b/tsl/src/bgw_policy/continuous_aggregate_api.c
index 2f91514910f..3181c872694 100644
--- a/tsl/src/bgw_policy/continuous_aggregate_api.c
+++ b/tsl/src/bgw_policy/continuous_aggregate_api.c
@@ -137,7 +137,7 @@ policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config, b
int64 res = get_time_from_config(dim, config, POL_REFRESH_CONF_KEY_END_OFFSET, end_isnull);
if (*end_isnull)
- return ts_time_get_end_or_max(ts_dimension_get_partition_type(dim));
+ return ts_time_get_noend_or_max(ts_dimension_get_partition_type(dim));
return res;
}
|
diff --git a/tsl/test/expected/cagg_policy.out b/tsl/test/expected/cagg_policy.out
index aaa6d5b8621..d85f7f4a24c 100644
--- a/tsl/test/expected/cagg_policy.out
+++ b/tsl/test/expected/cagg_policy.out
@@ -1400,3 +1400,72 @@ SELECT timescaledb_experimental.add_policies('cagg');
f
(1 row)
+-- Issue #6902
+-- Fix timestamp out of range in a refresh policy when setting `end_offset=>NULL`
+-- for a CAgg with variable sized bucket (i.e: using `time_bucket` with timezone)
+CREATE TABLE issue_6902 (
+ ts TIMESTAMPTZ NOT NULL,
+ temperature NUMERIC
+) WITH (
+ timescaledb.hypertable,
+ timescaledb.partition_column='ts',
+ timescaledb.chunk_interval='1 day',
+ timescaledb.compress='off'
+);
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+CREATE MATERIALIZED VIEW issue_6902_by_hour
+WITH (timescaledb.continuous) AS
+SELECT
+ time_bucket(INTERVAL '1 hour', ts, 'America/Sao_Paulo') AS bucket, -- using timezone
+ MAX(temperature),
+ MIN(temperature),
+ COUNT(*)
+FROM issue_6902
+GROUP BY 1
+WITH NO DATA;
+SELECT add_continuous_aggregate_policy (
+ 'issue_6902_by_hour',
+ start_offset => INTERVAL '3 hours',
+ end_offset => NULL,
+ schedule_interval => INTERVAL '12 hour',
+ initial_start => now() + INTERVAL '12 hour'
+) AS job_id \gset
+-- 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+ count
+-------
+ 181
+(1 row)
+
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+ count
+-------
+ 181
+(1 row)
+
+-- change existing data
+UPDATE issue_6902
+SET temperature = temperature + 1;
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+ count
+-------
+ 181
+(1 row)
+
+-- insert more data
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+-- run again without and should have 362 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+ count
+-------
+ 362
+(1 row)
+
diff --git a/tsl/test/sql/cagg_policy.sql b/tsl/test/sql/cagg_policy.sql
index a13f9251276..34a23813e34 100644
--- a/tsl/test/sql/cagg_policy.sql
+++ b/tsl/test/sql/cagg_policy.sql
@@ -676,3 +676,62 @@ AS SELECT time_bucket(1, a), sum(b)
FROM t GROUP BY time_bucket(1, a);
SELECT timescaledb_experimental.add_policies('cagg');
+
+-- Issue #6902
+-- Fix timestamp out of range in a refresh policy when setting `end_offset=>NULL`
+-- for a CAgg with variable sized bucket (i.e: using `time_bucket` with timezone)
+CREATE TABLE issue_6902 (
+ ts TIMESTAMPTZ NOT NULL,
+ temperature NUMERIC
+) WITH (
+ timescaledb.hypertable,
+ timescaledb.partition_column='ts',
+ timescaledb.chunk_interval='1 day',
+ timescaledb.compress='off'
+);
+
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+
+CREATE MATERIALIZED VIEW issue_6902_by_hour
+WITH (timescaledb.continuous) AS
+SELECT
+ time_bucket(INTERVAL '1 hour', ts, 'America/Sao_Paulo') AS bucket, -- using timezone
+ MAX(temperature),
+ MIN(temperature),
+ COUNT(*)
+FROM issue_6902
+GROUP BY 1
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy (
+ 'issue_6902_by_hour',
+ start_offset => INTERVAL '3 hours',
+ end_offset => NULL,
+ schedule_interval => INTERVAL '12 hour',
+ initial_start => now() + INTERVAL '12 hour'
+) AS job_id \gset
+
+-- 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- change existing data
+UPDATE issue_6902
+SET temperature = temperature + 1;
+
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- insert more data
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+
+-- run again without and should have 362 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
diff --git a/tsl/test/sql/cagg_policy.sql.orig b/tsl/test/sql/cagg_policy.sql.orig
new file mode 100644
index 00000000000..020f84dc11b
--- /dev/null
+++ b/tsl/test/sql/cagg_policy.sql.orig
@@ -0,0 +1,740 @@
+-- This file and its contents are licensed under the Timescale License.
+-- Please see the included NOTICE for copyright information and
+-- LICENSE-TIMESCALE for a copy of the license.
+
+-- test add and remove refresh policy apis
+
+SET ROLE :ROLE_DEFAULT_PERM_USER;
+
+--TEST1 ---
+--basic test with count
+CREATE TABLE int_tab (a integer, b integer, c integer);
+SELECT table_name FROM create_hypertable('int_tab', 'a', chunk_time_interval=> 10);
+
+INSERT INTO int_tab VALUES( 3 , 16 , 20);
+INSERT INTO int_tab VALUES( 1 , 10 , 20);
+INSERT INTO int_tab VALUES( 1 , 11 , 20);
+INSERT INTO int_tab VALUES( 1 , 12 , 20);
+INSERT INTO int_tab VALUES( 1 , 13 , 20);
+INSERT INTO int_tab VALUES( 1 , 14 , 20);
+INSERT INTO int_tab VALUES( 2 , 14 , 20);
+INSERT INTO int_tab VALUES( 2 , 15 , 20);
+INSERT INTO int_tab VALUES( 2 , 16 , 20);
+
+CREATE OR REPLACE FUNCTION integer_now_int_tab() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0) FROM int_tab $$;
+SELECT set_integer_now_func('int_tab', 'integer_now_int_tab');
+
+CREATE MATERIALIZED VIEW mat_m1( a, countb )
+WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+as
+SELECT a, count(b)
+FROM int_tab
+GROUP BY time_bucket(1, a), a WITH NO DATA;
+
+\c :TEST_DBNAME :ROLE_SUPERUSER
+
+SET timezone TO PST8PDT;
+
+DELETE FROM _timescaledb_config.bgw_job WHERE TRUE;
+
+SET ROLE :ROLE_DEFAULT_PERM_USER;
+SELECT count(*) FROM _timescaledb_config.bgw_job;
+
+\set ON_ERROR_STOP 0
+\set VERBOSITY default
+
+-- Test 1 step policy for integer type buckets
+ALTER materialized view mat_m1 set (timescaledb.compress = true);
+-- No policy is added if one errors out
+SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 1, refresh_end_offset => 10, compress_after => 11, drop_after => 20);
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+-- All policies are added in one step
+SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 10, refresh_end_offset => 1, compress_after => 11, drop_after => 20);
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+--Test coverage: new view for policies on CAggs
+SELECT * FROM timescaledb_experimental.policies ORDER BY relation_name, proc_name;
+
+--Test coverage: new view for policies only shows the policies for CAggs
+SELECT add_retention_policy('int_tab', 20);
+SELECT * FROM timescaledb_experimental.policies ORDER BY relation_name, proc_name;
+SELECT remove_retention_policy('int_tab');
+
+-- Test for duplicated policies (issue #5492)
+CREATE MATERIALIZED VIEW mat_m2( a, sumb )
+WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+as
+SELECT a, sum(b)
+FROM int_tab
+GROUP BY time_bucket(1, a), a WITH NO DATA;
+
+-- add refresh policy
+SELECT timescaledb_experimental.add_policies('mat_m2', refresh_start_offset => 10, refresh_end_offset => 1);
+SELECT timescaledb_experimental.show_policies('mat_m2');
+-- check for only one refresh policy for each cagg
+SELECT * FROM timescaledb_experimental.policies WHERE proc_name ~ 'refresh' ORDER BY relation_name, proc_name;
+
+SELECT timescaledb_experimental.remove_all_policies('mat_m2');
+DROP MATERIALIZED VIEW mat_m2;
+
+-- Alter policies
+SELECT timescaledb_experimental.alter_policies('mat_m1', refresh_start_offset => 11, compress_after=>13, drop_after => 25);
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+-- Remove one or more policy
+SELECT timescaledb_experimental.remove_policies('mat_m1', false, 'policy_refresh_continuous_aggregate', 'policy_compression');
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+-- Add one policy
+SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 10, refresh_end_offset => 1);
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+-- Remove all policies
+SELECT timescaledb_experimental.remove_policies('mat_m1', false, 'policy_refresh_continuous_aggregate', 'policy_retention');
+SELECT timescaledb_experimental.show_policies('mat_m1');
+
+--Cross policy checks
+--refresh and compression policy overlap
+SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 12, refresh_end_offset => 1, compress_after=>11);
+
+--refresh and retention policy overlap
+SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 12, refresh_end_offset => 1, drop_after=>11);
+
+--compression and retention policy overlap
+SELECT timescaledb_experimental.add_policies('mat_m1', compress_after => 10, drop_after => 10);
+
+-- Alter non existent policies
+SELECT timescaledb_experimental.alter_policies('mat_m1', refresh_start_offset => 12, compress_after=>11, drop_after => 15);
+
+ALTER materialized view mat_m1 set (timescaledb.compress = false);
+
+SELECT add_continuous_aggregate_policy('int_tab', '1 day'::interval, 10 , '1 h'::interval);
+SELECT add_continuous_aggregate_policy('mat_m1', '1 day'::interval, 10 , '1 h'::interval);
+SELECT add_continuous_aggregate_policy('mat_m1', '1 day'::interval, 10 );
+SELECT add_continuous_aggregate_policy('mat_m1', 10, '1 day'::interval, '1 h'::interval);
+--start_interval < end_interval
+SELECT add_continuous_aggregate_policy('mat_m1', 5, 10, '1h'::interval);
+--refresh window less than two buckets
+SELECT add_continuous_aggregate_policy('mat_m1', 11, 10, '1h'::interval);
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 10, '1h'::interval) as job_id \gset
+
+--adding again should warn/error
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 10, '1h'::interval, if_not_exists=>false);
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 15, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 10, '1h'::interval, if_not_exists=>true);
+
+-- modify config and try to add, should error out
+SELECT config FROM _timescaledb_config.bgw_job where id = :job_id;
+SELECT hypertable_id as mat_id FROM _timescaledb_config.bgw_job where id = :job_id \gset
+\set VERBOSITY terse
+\set ON_ERROR_STOP 1
+
+\c :TEST_DBNAME :ROLE_SUPERUSER
+
+SET timezone TO PST8PDT;
+
+UPDATE _timescaledb_config.bgw_job
+SET config = jsonb_build_object('mat_hypertable_id', :mat_id)
+WHERE id = :job_id;
+SET ROLE :ROLE_DEFAULT_PERM_USER;
+SELECT config FROM _timescaledb_config.bgw_job where id = :job_id;
+
+\set ON_ERROR_STOP 0
+\set VERBOSITY default
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 10, '1h'::interval, if_not_exists=>true);
+
+SELECT remove_continuous_aggregate_policy('int_tab');
+SELECT remove_continuous_aggregate_policy('mat_m1');
+-- add with NULL offset, readd with NULL offset
+SELECT add_continuous_aggregate_policy('mat_m1', 20, NULL, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_m1', 20, NULL, '1h'::interval, if_not_exists=>true); -- same param values, so we get a NOTICE
+SELECT add_continuous_aggregate_policy('mat_m1', NULL, NULL, '1h'::interval, if_not_exists=>true); -- different values, so we get a WARNING
+SELECT remove_continuous_aggregate_policy('mat_m1');
+SELECT add_continuous_aggregate_policy('mat_m1', NULL, 20, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_m1', NULL, 20, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_m1', NULL, NULL, '1h'::interval, if_not_exists=>true);
+SELECT remove_continuous_aggregate_policy('mat_m1');
+
+--this one will fail
+SELECT remove_continuous_aggregate_policy('mat_m1');
+SELECT remove_continuous_aggregate_policy('mat_m1', if_not_exists=>true);
+
+--now try to add a policy as a different user than the one that created the cagg
+--should fail
+SET ROLE :ROLE_DEFAULT_PERM_USER_2;
+SELECT add_continuous_aggregate_policy('mat_m1', 20, 10, '1h'::interval) as job_id ;
+\set VERBOSITY terse
+\set ON_ERROR_STOP 1
+
+SET ROLE :ROLE_DEFAULT_PERM_USER;
+DROP MATERIALIZED VIEW mat_m1;
+
+--- code coverage tests : add policy for timestamp and date based table ---
+CREATE TABLE continuous_agg_max_mat_date(time DATE);
+SELECT create_hypertable('continuous_agg_max_mat_date', 'time');
+CREATE MATERIALIZED VIEW max_mat_view_date
+ WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+ AS SELECT time_bucket('7 days', time)
+ FROM continuous_agg_max_mat_date
+ GROUP BY 1 WITH NO DATA;
+
+\set ON_ERROR_STOP 0
+\set VERBOSITY default
+
+-- Test 1 step policy for timestamp type buckets
+ALTER materialized view max_mat_view_date set (timescaledb.compress = true);
+-- Only works for cagg
+SELECT timescaledb_experimental.add_policies('continuous_agg_max_mat_date', refresh_start_offset => '1 day'::interval, refresh_end_offset => '2 day'::interval, compress_after => '20 days'::interval, drop_after => '25 days'::interval);
+SELECT timescaledb_experimental.show_policies('continuous_agg_max_mat_date');
+SELECT timescaledb_experimental.alter_policies('continuous_agg_max_mat_date', compress_after=>'16 days'::interval);
+SELECT timescaledb_experimental.remove_policies('continuous_agg_max_mat_date', false, 'policy_refresh_continuous_aggregate');
+
+-- No policy is added if one errors out
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_start_offset => '1 day'::interval, refresh_end_offset => '2 day'::interval, compress_after => '20 days'::interval, drop_after => '25 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+-- Create open ended refresh_policy
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_end_offset => '2 day'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate');
+
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_end_offset => '2 day'::interval, refresh_start_offset=>'-infinity');
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate');
+
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_start_offset => '2 day'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate');
+
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_start_offset => '2 day'::interval, refresh_end_offset=>'infinity');
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate');
+
+-- Open ended at both sides, for code coverage
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_end_offset => 'infinity', refresh_start_offset => '-infinity');
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate');
+
+-- All policies are added in one step
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_start_offset => '15 days'::interval, refresh_end_offset => '1 day'::interval, compress_after => '20 days'::interval, drop_after => '25 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+-- Alter policies
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', refresh_start_offset => '16 days'::interval, compress_after=>'26 days'::interval, drop_after => '40 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+--Alter refresh_policy to make it open ended
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_retention', 'policy_compression');
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', refresh_start_offset =>'-infinity');
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', refresh_end_offset =>'infinity', refresh_start_offset =>'5 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+--Cross policy checks
+-- Refresh and compression policies overlap
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', compress_after => '20 days'::interval, drop_after => '25 days'::interval);
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', compress_after=> '4 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+-- Refresh and retention policies overlap
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', refresh_start_offset =>'5 days'::interval, drop_after=> '4 days'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+--Do not allow refreshed data to be deleted
+SELECT add_retention_policy('continuous_agg_max_mat_date', '25 days'::interval);
+SELECT timescaledb_experimental.alter_policies('max_mat_view_date', refresh_start_offset =>'25 days'::interval);
+SELECT remove_retention_policy('continuous_agg_max_mat_date');
+
+-- Remove one or more policy
+-- Code coverage: no policy names provided
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false);
+
+-- Code coverage: incorrect name of policy
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'refresh_policy');
+
+SELECT timescaledb_experimental.remove_policies('max_mat_view_date', false, 'policy_refresh_continuous_aggregate', 'policy_compression');
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+-- Add one policy
+SELECT timescaledb_experimental.add_policies('max_mat_view_date', refresh_start_offset => '15 day'::interval, refresh_end_offset => '1 day'::interval);
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+-- Remove all policies
+SELECT * FROM timescaledb_experimental.policies ORDER BY relation_name, proc_name;
+SELECT timescaledb_experimental.remove_all_policies(NULL); -- should fail
+SELECT timescaledb_experimental.remove_all_policies('continuous_agg_max_mat_date'); -- should fail
+SELECT timescaledb_experimental.remove_all_policies('max_mat_view_date', false);
+SELECT timescaledb_experimental.remove_all_policies('max_mat_view_date', false); -- should fail
+CREATE OR REPLACE FUNCTION custom_func(jobid int, args jsonb) RETURNS RECORD LANGUAGE SQL AS
+$$
+ VALUES($1, $2, 'custom_func');
+$$;
+ -- inject custom job
+SELECT add_job('custom_func','1h', config:='{"type":"function"}'::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS job_id \gset
+SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id, 'max_mat_view_date'::regclass);
+SELECT * FROM timescaledb_information.jobs WHERE job_id != 1 ORDER BY 1;
+SELECT timescaledb_experimental.remove_all_policies('max_mat_view_date', true); -- ignore custom job
+SELECT delete_job(:job_id);
+DROP FUNCTION custom_func;
+SELECT timescaledb_experimental.show_policies('max_mat_view_date');
+
+ALTER materialized view max_mat_view_date set (timescaledb.compress = false);
+
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '2 days', 10, '1 day'::interval);
+--start_interval < end_interval
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '1 day'::interval, '2 days'::interval , '1 day'::interval) ;
+--interval less than two buckets
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '7 days', '1 day', '1 day'::interval);
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '14 days', '1 day', '1 day'::interval);
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '13 days', '-10 hours', '1 day'::interval);
+\set VERBOSITY terse
+\set ON_ERROR_STOP 1
+
+-- Negative start offset gives two bucket window:
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '13 days', '-1 day', '1 day'::interval);
+SELECT remove_continuous_aggregate_policy('max_mat_view_date');
+-- Both offsets NULL:
+SELECT add_continuous_aggregate_policy('max_mat_view_date', NULL, NULL, '1 day'::interval);
+SELECT remove_continuous_aggregate_policy('max_mat_view_date');
+
+SELECT add_continuous_aggregate_policy('max_mat_view_date', '15 days', '1 day', '1 day'::interval) as job_id \gset
+SELECT config FROM _timescaledb_config.bgw_job
+WHERE id = :job_id;
+
+INSERT INTO continuous_agg_max_mat_date
+ SELECT generate_series('2019-09-01'::date, '2019-09-10'::date, '1 day');
+--- to prevent NOTICES set message level to warning
+SET client_min_messages TO warning;
+CALL run_job(:job_id);
+RESET client_min_messages;
+DROP MATERIALIZED VIEW max_mat_view_date;
+
+CREATE TABLE continuous_agg_timestamp(time TIMESTAMP);
+SELECT create_hypertable('continuous_agg_timestamp', 'time');
+
+CREATE MATERIALIZED VIEW max_mat_view_timestamp
+ WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+ AS SELECT time_bucket('7 days', time)
+ FROM continuous_agg_timestamp
+ GROUP BY 1 WITH NO DATA;
+
+--the start offset overflows the smallest time value, but is capped at
+--the min value
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', '1000000 years', '1 day' , '1 h'::interval);
+SELECT remove_continuous_aggregate_policy('max_mat_view_timestamp');
+
+\set ON_ERROR_STOP 0
+\set VERBOSITY default
+--start and end offset capped at the lowest time value, which means
+--zero size window
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', '1000000 years', '900000 years' , '1 h'::interval);
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', '301 days', '10 months' , '1 h'::interval);
+\set VERBOSITY terse
+\set ON_ERROR_STOP 1
+
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', '15 days', '1 h'::interval , '1 h'::interval) as job_id \gset
+
+--- to prevent NOTICES set message level to warning
+SET client_min_messages TO warning;
+CALL run_job(:job_id);
+RESET client_min_messages ;
+
+SELECT config FROM _timescaledb_config.bgw_job
+WHERE id = :job_id;
+
+\c :TEST_DBNAME :ROLE_SUPERUSER
+
+SET timezone TO PST8PDT;
+
+UPDATE _timescaledb_config.bgw_job
+SET config = jsonb_build_object('mat_hypertable_id', :mat_id)
+WHERE id = :job_id;
+
+SET ROLE :ROLE_DEFAULT_PERM_USER;
+SELECT config FROM _timescaledb_config.bgw_job where id = :job_id;
+\set ON_ERROR_STOP 0
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', '15 day', '1 day', '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('max_mat_view_timestamp', 'xyz', '1 day', '1h'::interval, if_not_exists=>true);
+\set ON_ERROR_STOP 1
+
+DROP MATERIALIZED VIEW max_mat_view_timestamp;
+
+--smallint table
+CREATE TABLE smallint_tab (a smallint);
+SELECT table_name FROM create_hypertable('smallint_tab', 'a', chunk_time_interval=> 10);
+CREATE OR REPLACE FUNCTION integer_now_smallint_tab() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a)::smallint, 0::smallint) FROM smallint_tab ; $$;
+SELECT set_integer_now_func('smallint_tab', 'integer_now_smallint_tab');
+
+CREATE MATERIALIZED VIEW mat_smallint( a, countb )
+WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+as
+SELECT time_bucket( SMALLINT '1', a) , count(*)
+FROM smallint_tab
+GROUP BY 1 WITH NO DATA;
+\set ON_ERROR_STOP 0
+\set VERBOSITY default
+
+-- Test 1 step policy for smallint type buckets
+ALTER materialized view mat_smallint set (timescaledb.compress = true);
+
+-- All policies are added in one step
+SELECT timescaledb_experimental.add_policies('mat_smallint', refresh_start_offset => 10::smallint, refresh_end_offset => 1::smallint, compress_after => 11::smallint, drop_after => 20::smallint);
+SELECT timescaledb_experimental.show_policies('mat_smallint');
+
+-- Alter policies
+SELECT timescaledb_experimental.alter_policies('mat_smallint', refresh_start_offset => 11::smallint, compress_after=>13::smallint, drop_after => 25::smallint);
+SELECT timescaledb_experimental.show_policies('mat_smallint');
+
+SELECT timescaledb_experimental.remove_all_policies('mat_smallint', false);
+ALTER materialized view mat_smallint set (timescaledb.compress = false);
+
+SELECT add_continuous_aggregate_policy('mat_smallint', 15, 0 , '1 h'::interval);
+SELECT add_continuous_aggregate_policy('mat_smallint', 98898::smallint , 0::smallint, '1 h'::interval);
+SELECT add_continuous_aggregate_policy('mat_smallint', 5::smallint, 10::smallint , '1 h'::interval) as job_id \gset
+\set VERBOSITY terse
+\set ON_ERROR_STOP 1
+SELECT add_continuous_aggregate_policy('mat_smallint', 15::smallint, 0::smallint , '1 h'::interval) as job_id \gset
+INSERT INTO smallint_tab VALUES(5);
+INSERT INTO smallint_tab VALUES(10);
+INSERT INTO smallint_tab VALUES(20);
+CALL run_job(:job_id);
+SELECT * FROM mat_smallint ORDER BY 1;
+
+--remove all the data--
+TRUNCATE table smallint_tab;
+CALL refresh_continuous_aggregate('mat_smallint', NULL, NULL);
+SELECT * FROM mat_smallint ORDER BY 1;
+
+-- Case 1: overflow by subtracting from PG_INT16_MIN
+--overflow start_interval, end_interval [-32768, -32768)
+SELECT remove_continuous_aggregate_policy('mat_smallint');
+INSERT INTO smallint_tab VALUES( -32768 );
+SELECT integer_now_smallint_tab();
+SELECT add_continuous_aggregate_policy('mat_smallint', 10::smallint, 5::smallint , '1 h'::interval) as job_id \gset
+
+\set ON_ERROR_STOP 0
+CALL run_job(:job_id);
+\set ON_ERROR_STOP 1
+SELECT * FROM mat_smallint ORDER BY 1;
+
+-- overflow start_interval. now this runs as range is capped [-32768, -32765)
+INSERT INTO smallint_tab VALUES( -32760 );
+SELECT maxval, maxval - 10, maxval -5 FROM integer_now_smallint_tab() as maxval;
+CALL run_job(:job_id);
+SELECT * FROM mat_smallint ORDER BY 1;
+
+--remove all the data--
+TRUNCATE table smallint_tab;
+CALL refresh_continuous_aggregate('mat_smallint', NULL, NULL);
+SELECT * FROM mat_smallint ORDER BY 1;
+
+-- Case 2: overflow by subtracting from PG_INT16_MAX
+--overflow start and end . will fail as range is [32767, 32767]
+SELECT remove_continuous_aggregate_policy('mat_smallint');
+INSERT INTO smallint_tab VALUES( 32766 );
+INSERT INTO smallint_tab VALUES( 32767 );
+SELECT maxval, maxval - (-1), maxval - (-2) FROM integer_now_smallint_tab() as maxval;
+SELECT add_continuous_aggregate_policy('mat_smallint', -1::smallint, -3::smallint , '1 h'::interval) as job_id \gset
+\set ON_ERROR_STOP 0
+CALL run_job(:job_id);
+\set ON_ERROR_STOP 1
+SELECT * FROM mat_smallint ORDER BY 1;
+
+SELECT remove_continuous_aggregate_policy('mat_smallint');
+--overflow end . will work range is [32765, 32767)
+SELECT maxval, maxval - (1), maxval - (-2) FROM integer_now_smallint_tab() as maxval;
+SELECT add_continuous_aggregate_policy('mat_smallint', 1::smallint, -3::smallint , '1 h'::interval) as job_id \gset
+\set ON_ERROR_STOP 0
+CALL run_job(:job_id);
+SELECT * FROM mat_smallint ORDER BY 1;
+
+-- tests for interval argument conversions
+--
+\set ON_ERROR_STOP 0
+SELECT add_continuous_aggregate_policy('mat_smallint', 15, 10, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_smallint', '15', 10, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('mat_smallint', '15', '10', '1h'::interval, if_not_exists=>true);
+\set ON_ERROR_STOP 1
+
+
+--bigint table
+CREATE TABLE bigint_tab (a bigint);
+SELECT table_name FROM create_hypertable('bigint_tab', 'a', chunk_time_interval=> 10);
+CREATE OR REPLACE FUNCTION integer_now_bigint_tab() returns bigint LANGUAGE SQL STABLE as $$ SELECT 20::bigint $$;
+SELECT set_integer_now_func('bigint_tab', 'integer_now_bigint_tab');
+
+CREATE MATERIALIZED VIEW mat_bigint( a, countb )
+WITH (timescaledb.continuous, timescaledb.materialized_only=true)
+as
+SELECT time_bucket( BIGINT '1', a) , count(*)
+FROM bigint_tab
+GROUP BY 1 WITH NO DATA;
+
+-- Test 1 step policy for bigint type buckets
+ALTER materialized view mat_bigint set (timescaledb.compress = true);
+
+-- All policies are added in one step
+SELECT timescaledb_experimental.add_policies('mat_bigint', refresh_start_offset => 10::bigint, refresh_end_offset => 1::bigint, compress_after => 11::bigint, drop_after => 20::bigint);
+SELECT timescaledb_experimental.show_policies('mat_bigint');
+
+-- Alter policies
+SELECT timescaledb_experimental.alter_policies('mat_bigint', refresh_start_offset => 11::bigint, compress_after=>13::bigint, drop_after => 25::bigint);
+SELECT timescaledb_experimental.show_policies('mat_bigint');
+
+SELECT timescaledb_experimental.remove_all_policies('mat_bigint', false);
+ALTER materialized view mat_bigint set (timescaledb.compress = false);
+
+\set ON_ERROR_STOP 0
+SELECT add_continuous_aggregate_policy('mat_bigint', 5::bigint, 10::bigint , '1 h'::interval) ;
+\set ON_ERROR_STOP 1
+SELECT add_continuous_aggregate_policy('mat_bigint', 15::bigint, 0::bigint , '1 h'::interval) as job_mid \gset
+INSERT INTO bigint_tab VALUES(5);
+INSERT INTO bigint_tab VALUES(10);
+INSERT INTO bigint_tab VALUES(20);
+CALL run_job(:job_mid);
+SELECT * FROM mat_bigint ORDER BY 1;
+
+-- test NULL for end
+SELECT remove_continuous_aggregate_policy('mat_bigint');
+SELECT add_continuous_aggregate_policy('mat_bigint', 1::smallint, NULL , '1 h'::interval) as job_id \gset
+INSERT INTO bigint_tab VALUES(500);
+CALL run_job(:job_id);
+SELECT * FROM mat_bigint WHERE a>100 ORDER BY 1;
+
+ALTER MATERIALIZED VIEW mat_bigint SET (timescaledb.compress);
+ALTER MATERIALIZED VIEW mat_smallint SET (timescaledb.compress);
+-- With immutable compressed chunks, these policies would fail by overlapping the refresh window
+SELECT add_compression_policy('mat_smallint', -4::smallint);
+SELECT remove_compression_policy('mat_smallint');
+SELECT add_compression_policy('mat_bigint', 0::bigint);
+SELECT remove_compression_policy('mat_bigint');
+-- End previous limitation tests
+
+SELECT add_compression_policy('mat_smallint', 5::smallint);
+SELECT add_compression_policy('mat_bigint', 20::bigint);
+
+-- end of coverage tests
+
+--TEST continuous aggregate + compression policy on caggs
+CREATE TABLE metrics (
+ time timestamptz NOT NULL,
+ device_id int,
+ device_id_peer int,
+ v0 int,
+ v1 int,
+ v2 float,
+ v3 float
+);
+
+SELECT create_hypertable('metrics', 'time');
+
+INSERT INTO metrics (time, device_id, device_id_peer, v0, v1, v2, v3)
+SELECT time,
+ device_id,
+ 0,
+ device_id + 1,
+ device_id + 2,
+ 0.5,
+ NULL
+FROM generate_series('2000-01-01 0:00:00+0'::timestamptz, '2000-01-02 23:55:00+0', '20m') gtime (time),
+ generate_series(1, 2, 1) gdevice (device_id);
+
+ALTER TABLE metrics SET ( timescaledb.compress );
+SELECT compress_chunk(ch) FROM show_chunks('metrics') ch;
+
+CREATE MATERIALIZED VIEW metrics_cagg WITH (timescaledb.continuous,
+ timescaledb.materialized_only = true)
+AS
+SELECT time_bucket('1 day', time) as dayb, device_id,
+ sum(v0), avg(v3)
+FROM metrics
+GROUP BY 1, 2
+WITH NO DATA;
+
+-- this was previously crashing
+SELECT add_continuous_aggregate_policy('metrics_cagg', '7 day'::interval, NULL, '1 h'::interval, if_not_exists => true);
+\set ON_ERROR_STOP 0
+SELECT add_continuous_aggregate_policy('metrics_cagg', '7 day'::interval, '1 day'::interval, '1 h'::interval, if_not_exists => true);
+SELECT remove_continuous_aggregate_policy('metrics_cagg');
+SELECT add_continuous_aggregate_policy('metrics_cagg', NULL, '1 day'::interval, '1h'::interval, if_not_exists=>true);
+SELECT add_continuous_aggregate_policy('metrics_cagg', NULL, '1 day'::interval, '1h'::interval, if_not_exists=>true); -- same param values, so we get a NOTICE
+SELECT add_continuous_aggregate_policy('metrics_cagg', NULL, NULL, '1h'::interval, if_not_exists=>true); -- different values, so we get a WARNING
+SELECT remove_continuous_aggregate_policy('metrics_cagg');
+--can set compression policy only after setting up refresh policy --
+SELECT add_compression_policy('metrics_cagg', '1 day'::interval);
+
+--can set compression policy only after enabling compression --
+SELECT add_continuous_aggregate_policy('metrics_cagg', '7 day'::interval, '1 day'::interval, '1 h'::interval) as "REFRESH_JOB" \gset
+SELECT add_compression_policy('metrics_cagg', '8 day'::interval) AS "COMP_JOB" ;
+ALTER MATERIALIZED VIEW metrics_cagg SET (timescaledb.compress);
+
+--cannot use compress_created_before with cagg
+SELECT add_compression_policy('metrics_cagg', compress_created_before => '8 day'::interval) AS "COMP_JOB" ;
+\set ON_ERROR_STOP 1
+
+
+SELECT add_compression_policy('metrics_cagg', '8 day'::interval) AS "COMP_JOB" ;
+SELECT remove_compression_policy('metrics_cagg');
+SELECT add_compression_policy('metrics_cagg', '8 day'::interval) AS "COMP_JOB" \gset
+
+--verify that jobs were added for the policies ---
+SELECT materialization_hypertable_name AS "MAT_TABLE_NAME",
+ view_name AS "VIEW_NAME"
+FROM timescaledb_information.continuous_aggregates
+WHERE view_name = 'metrics_cagg' \gset
+
+SELECT count(*) FROM timescaledb_information.jobs
+WHERE hypertable_name = :'VIEW_NAME';
+
+--exec the cagg compression job --
+CALL refresh_continuous_aggregate('metrics_cagg', NULL, '2001-02-01 00:00:00+0');
+CALL run_job(:COMP_JOB);
+SELECT count(*), count(*) FILTER ( WHERE is_compressed is TRUE )
+FROM timescaledb_information.chunks
+WHERE hypertable_name = :'MAT_TABLE_NAME' ORDER BY 1;
+
+--add some new data into metrics_cagg so that cagg policy job has something to do
+INSERT INTO metrics (time, device_id, device_id_peer, v0, v1, v2, v3)
+SELECT now() - '5 day'::interval, 102, 0, 10, 10, 10, 10;
+CALL run_job(:REFRESH_JOB);
+--now we have a new chunk and it is not compressed
+SELECT count(*), count(*) FILTER ( WHERE is_compressed is TRUE )
+FROM timescaledb_information.chunks
+WHERE hypertable_name = :'MAT_TABLE_NAME' ORDER BY 1;
+
+--verify that both jobs are dropped when view is dropped
+DROP MATERIALIZED VIEW metrics_cagg;
+
+SELECT count(*) FROM timescaledb_information.jobs
+WHERE hypertable_name = :'VIEW_NAME';
+
+-- add test case for issue 4252
+CREATE TABLE IF NOT EXISTS sensor_data(
+time TIMESTAMPTZ NOT NULL,
+sensor_id INTEGER,
+temperature DOUBLE PRECISION,
+cpu DOUBLE PRECISION);
+
+SELECT create_hypertable('sensor_data','time');
+
+INSERT INTO sensor_data(time, sensor_id, cpu, temperature)
+SELECT
+time,
+sensor_id,
+extract(dow from time) AS cpu,
+extract(doy from time) AS temperature
+FROM
+generate_series('2022-05-05'::timestamp at time zone 'UTC' - interval '6 weeks', '2022-05-05'::timestamp at time zone 'UTC', interval '5 hours') as g1(time),
+generate_series(1,1000,1) as g2(sensor_id);
+
+CREATE materialized view deals_best_weekly
+WITH (timescaledb.continuous) AS
+SELECT
+time_bucket('7 days', "time") AS bucket,
+avg(temperature) AS avg_temp,
+max(cpu) AS max_rating
+FROM sensor_data
+GROUP BY bucket
+WITH NO DATA;
+
+CREATE materialized view deals_best_daily
+WITH (timescaledb.continuous) AS
+SELECT
+time_bucket('1 day', "time") AS bucket,
+avg(temperature) AS avg_temp,
+max(cpu) AS max_rating
+FROM sensor_data
+GROUP BY bucket
+WITH NO DATA;
+
+ALTER materialized view deals_best_weekly set (timescaledb.materialized_only=true);
+ALTER materialized view deals_best_daily set (timescaledb.materialized_only=true);
+
+-- we have data from 6 weeks before to May 5 2022 (Thu)
+CALL refresh_continuous_aggregate('deals_best_weekly', '2022-04-24', '2022-05-03');
+SELECT * FROM deals_best_weekly ORDER BY bucket;
+CALL refresh_continuous_aggregate('deals_best_daily', '2022-04-20', '2022-05-04');
+SELECT * FROM deals_best_daily ORDER BY bucket LIMIT 2;
+-- expect to get an up-to-date notice
+CALL refresh_continuous_aggregate('deals_best_weekly', '2022-04-24', '2022-05-05');
+SELECT * FROM deals_best_weekly ORDER BY bucket;
+
+-- github issue 5907: segfault when creating 1-step policies on cagg
+-- whose underlying hypertable has a retention policy setup
+CREATE TABLE t(a integer NOT NULL, b integer);
+SELECT create_hypertable('t', 'a', chunk_time_interval=> 10);
+
+CREATE OR REPLACE FUNCTION unix_now() returns int LANGUAGE SQL IMMUTABLE as $$ SELECT extract(epoch from now())::INT $$;
+SELECT set_integer_now_func('t', 'unix_now');
+
+SELECT add_retention_policy('t', 20);
+
+CREATE MATERIALIZED VIEW cagg(a, sumb) WITH (timescaledb.continuous)
+AS SELECT time_bucket(1, a), sum(b)
+ FROM t GROUP BY time_bucket(1, a);
+
+SELECT timescaledb_experimental.add_policies('cagg');
+
+-- Issue #6902
+<<<<<<< HEAD
+-- Fix timestamp out of range in a refresh policy when setting `end_offset=>NULL`
+-- for a CAgg with variable sized bucket (i.e: using `time_bucket` with timezone)
+=======
+>>>>>>> cda33125c (Fix timestamp out of range in CAgg refresh policy)
+CREATE TABLE issue_6902 (
+ ts TIMESTAMPTZ NOT NULL,
+ temperature NUMERIC
+) WITH (
+ timescaledb.hypertable,
+ timescaledb.partition_column='ts',
+ timescaledb.chunk_interval='1 day',
+ timescaledb.compress='off'
+);
+
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+
+CREATE MATERIALIZED VIEW issue_6902_by_hour
+WITH (timescaledb.continuous) AS
+SELECT
+ time_bucket(INTERVAL '1 hour', ts, 'America/Sao_Paulo') AS bucket, -- using timezone
+ MAX(temperature),
+ MIN(temperature),
+ COUNT(*)
+FROM issue_6902
+GROUP BY 1
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy (
+ 'issue_6902_by_hour',
+ start_offset => INTERVAL '3 hours',
+ end_offset => NULL,
+ schedule_interval => INTERVAL '12 hour',
+ initial_start => now() + INTERVAL '12 hour'
+) AS job_id \gset
+
+-- 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- change existing data
+UPDATE issue_6902
+SET temperature = temperature + 1;
+
+-- run again without any change, remain 181 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
+
+-- insert more data
+INSERT INTO issue_6902
+SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
+
+-- run again without and should have 362 rows
+CALL run_job(:job_id);
+SELECT count(*) FROM issue_6902;
|
[Bug]: Continuous aggregates with time_bucket hourly and timezone fail to update
### What type of bug is this?
Incorrect result, Unexpected error
### What subsystems and features are affected?
Continuous aggregate
### What happened?
I have a number of Continuous aggregates that uses hourly buckets
```
time_bucket(interval '1 hour', hypertable_name.time, 'Asia/Dubai') AS bucket
```
I noticed that i stopped seeing the new results after recreating the hypertable. After hours of debugging i pinned down that the hourly buckets dont refresh when used with timezone

### TimescaleDB version affected
2.14.2 and 2.15.0
### PostgreSQL version used
15.6
### What operating system did you use?
Arch Linux and Ubuntu Linux
### What installation method did you use?
Deb/Apt
### What platform did you run on?
On prem/Self-hosted
### Relevant log output and stack trace
```bash
May 09 11:40:20 ahmad-82nd postgres[245522]: 2024-05-09 11:40:20.361 +04 [245522] DEBUG: launching job 1000 "Refresh Continuous Aggregate Policy [1000]"
May 09 11:40:20 ahmad-82nd postgres[246102]: 2024-05-09 11:40:20.368 +04 [246102] DEBUG: extension state changed: unknown to created
May 09 11:40:20 ahmad-82nd postgres[246102]: 2024-05-09 11:40:20.368 +04 [246102] DEBUG: Executing policy_refresh_continuous_aggregate with parameters {"end_offset": null, "start_offset": "3 mons", "mat_hypertable_id": 3}
May 09 11:40:20 ahmad-82nd postgres[246102]: 2024-05-09 11:40:20.369 +04 [246102] DEBUG: rehashing catalog cache id 35 for pg_namespace; 9 tups, 4 buckets
May 09 11:40:20 ahmad-82nd postgres[246102]: 2024-05-09 11:40:20.370 +04 [246102] LOG: job 1000 threw an error
May 09 11:40:20 ahmad-82nd postgres[246102]: 2024-05-09 11:40:20.370 +04 [246102] ERROR: timestamp out of range
May 09 11:40:20 ahmad-82nd postgres[180293]: 2024-05-09 11:40:20.375 +04 [180293] LOG: background worker "Refresh Continuous Aggregate Policy [1000]" (PID 246102) exited with exit code 1
```
### How can we reproduce the bug?
```bash
CREATE TABLE metrics (
sensor_id INTEGER NOT NULL,
value DOUBLE PRECISION NOT NULL,
timestamp TIMESTAMPTZ NOT NULL
);
SELECT create_hypertable('metrics', 'timestamp');
-- insert data into the hyper table
INSERT INTO metrics
SELECT
s.sensor_id,
random()*50 + 10,
timestamp
FROM
generate_series(DATE (now() - interval '6 month'), (now() - interval '5 day'), INTERVAL '1 hours') AS timestamp
CROSS JOIN (SELECT generate_series(1, 200) as sensor_id) as s;
-- hourly continuous aggregate with timezone
CREATE MATERIALIZED VIEW datalake_hourly
WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS
SELECT
time_bucket(INTERVAL '1 hour', metrics.timestamp, 'Asia/Dubai') AS bucket,
sensor_id,
count(*) as count_items
FROM metrics
GROUP BY bucket, sensor_id;
SELECT add_continuous_aggregate_policy(
'datalake_hourly',
start_offset => INTERVAL '3 months',
end_offset => Null,
schedule_interval => INTERVAL '15 seconds'
);
-- insert few more rows
INSERT INTO metrics
SELECT
s.sensor_id,
random()*50 + 10,
timestamp
FROM
generate_series((now() - interval '5 day'), now(), INTERVAL '1 hour') AS timestamp
CROSS JOIN (SELECT generate_series(1, 200) as sensor_id) as s;
-- wait and check the continuous aggregate
SELECT
timescaledb_information.jobs.job_id,
timescaledb_information.jobs.application_name,
timescaledb_information.jobs.initial_start,
timescaledb_information.jobs.next_start,
timescaledb_information.jobs.config,
timescaledb_information.jobs.schedule_interval,
timescaledb_information.jobs.max_runtime ,
timescaledb_information.jobs.max_retries ,
timescaledb_information.jobs.retry_period ,
timescaledb_information.jobs.scheduled ,
timescaledb_information.jobs.fixed_schedule ,
timescaledb_information.jobs.hypertable_name ,
timescaledb_information.continuous_aggregates.view_name ,
last_run_started_at ,
job_status,
last_run_status,
last_successful_finish ,
total_runs,
total_failures ,
total_successes
FROM timescaledb_information.jobs
left join timescaledb_information.job_stats on timescaledb_information.jobs.job_id = timescaledb_information.job_stats.job_id
left join timescaledb_information.continuous_aggregates on timescaledb_information.continuous_aggregates.materialization_hypertable_name = timescaledb_information.jobs.hypertable_name
order by job_id;
```
|
Thanks for the script, it reproduces for me. The error is related to the NULL value of the `end_offset`, so probably as a workaround you can set it to something far in the future but not null.
@fabriziomello should the end point calculation follow the same logic as `cagg_get_time_min`? Currently the null end point is returned as max time and not +infinity even for variable-bucket caggs, so the calculations in `ts_compute_inscribed_bucketed_refresh_window_variable` fail.
Hi @akuzm thank you for the reply. I believe in the `end_offset` we can only specify an interval and so this seemed to work with me `'-10 years'` and refreshed the data correctly from today post creating the job.
However I believe we should add a warning or enhance the code related to handling cases with NULL as we had lots of issue with it from back in time
Minimal reproducible test case:
```sql
-- Issue #6902
CREATE TABLE issue_6902 (
ts TIMESTAMPTZ NOT NULL,
temperature NUMERIC
) WITH (
timescaledb.hypertable,
timescaledb.partition_column='ts',
timescaledb.chunk_interval='1 day',
timescaledb.compress='off'
);
INSERT INTO issue_6902
SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
CREATE MATERIALIZED VIEW issue_6902_by_hour
WITH (timescaledb.continuous) AS
SELECT
time_bucket(INTERVAL '1 hour', ts, 'America/Sao_Paulo') AS bucket, -- using timezone
MAX(temperature),
MIN(temperature),
COUNT(*)
FROM issue_6902
GROUP BY 1
WITH NO DATA;
SELECT add_continuous_aggregate_policy (
'issue_6902_by_hour',
start_offset => INTERVAL '3 hours',
end_offset => NULL,
schedule_interval => INTERVAL '12 hour',
initial_start => now() + INTERVAL '12 hour'
) AS job_id \gset
-- 181 rows
CALL run_job(:job_id);
SELECT count(*) FROM issue_6902;
-- run again FAIL
CALL run_job(:job_id);
```
|
Thanks for the script, it reproduces for me. The error is related to the NULL value of the `end_offset`, so probably as a workaround you can set it to something far in the future but not null.
@fabriziomello should the end point calculation follow the same logic as `cagg_get_time_min`? Currently the null end point is returned as max time and not +infinity even for variable-bucket caggs, so the calculations in `ts_compute_inscribed_bucketed_refresh_window_variable` fail.
Hi @akuzm thank you for the reply. I believe in the `end_offset` we can only specify an interval and so this seemed to work with me `'-10 years'` and refreshed the data correctly from today post creating the job.
However I believe we should add a warning or enhance the code related to handling cases with NULL as we had lots of issue with it from back in time
Minimal reproducible test case:
```sql
-- Issue #6902
CREATE TABLE issue_6902 (
ts TIMESTAMPTZ NOT NULL,
temperature NUMERIC
) WITH (
timescaledb.hypertable,
timescaledb.partition_column='ts',
timescaledb.chunk_interval='1 day',
timescaledb.compress='off'
);
INSERT INTO issue_6902
SELECT t, 1 FROM generate_series(now() - interval '3 hours', now(), interval '1 minute') AS t;
CREATE MATERIALIZED VIEW issue_6902_by_hour
WITH (timescaledb.continuous) AS
SELECT
time_bucket(INTERVAL '1 hour', ts, 'America/Sao_Paulo') AS bucket, -- using timezone
MAX(temperature),
MIN(temperature),
COUNT(*)
FROM issue_6902
GROUP BY 1
WITH NO DATA;
SELECT add_continuous_aggregate_policy (
'issue_6902_by_hour',
start_offset => INTERVAL '3 hours',
end_offset => NULL,
schedule_interval => INTERVAL '12 hour',
initial_start => now() + INTERVAL '12 hour'
) AS job_id \gset
-- 181 rows
CALL run_job(:job_id);
SELECT count(*) FROM issue_6902;
-- run again FAIL
CALL run_job(:job_id);
```
|
[
"https://github.com/timescale/timescaledb/commit/e39f398c62e9d3271ad15179f46aa4985ea54f4a"
] |
2025-08-28T23:08:57Z
|
https://github.com/timescale/timescaledb/tree/bbbebbfa978465bfdf6d0bac418ee4d269ad4da8
|
[
"cmake --build build --parallel ; make -C build install"
] |
[
"PATH=$PATH:/usr/lib/postgresql/17/bin pkill -u postgres postgres || true && rm -rf /tmp/pgdata && chown -R postgres:postgres /testbed/build && su - postgres -c \"PATH=$PATH:/usr/lib/postgresql/17/bin make -C /testbed/build installcheck\" 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] |
def parser(log: str) -> dict[str, str]:
import re
results = {}
for line in log.splitlines():
m = re.match(r'(ok|not ok)\s+\d+\s+\+?\s*([^\s]+)', line)
if m:
status_word, test_name = m.groups()
status = "pass" if status_word == "ok" else "fail"
results[test_name] = status
return results
|
[
"cagg_policy"
] |
[
"compression_qualpushdown",
"chunk_column_stats",
"cagg_deprecated_bucket_ng",
"compress_default",
"cagg_refresh_using_trigger",
"cagg_policy_concurrent",
"compression_sorted_merge",
"modify_exclusion",
"ordered_append-17",
"compress_auto_sparse_index",
"vector_agg_groupagg",
"merge_append_partially_compressed",
"vector_agg_functions",
"size_utils_tsl",
"compress_float8_corrupt",
"compression",
"create_table_with",
"vector_agg_param",
"policy_generalization",
"compress_bitmap_scan",
"columnar_scan_cost",
"plan_skip_scan-17",
"cagg_permissions-17",
"cagg_query_using_merge-17",
"bgw_telemetry",
"cagg_utils",
"compression_null_dump_restore",
"merge_chunks",
"transparent_decompression-17",
"cagg_watermark",
"columnstore_aliases",
"compress_dml_copy",
"compression_insert",
"compression_sorted_merge_distinct",
"compressed_copy",
"compression_conflicts",
"compression_nulls_and_defaults",
"foreign_keys_test",
"cagg_refresh_using_merge",
"plan_skip_scan_notnull",
"compression_permissions-17",
"bgw_security",
"jit",
"cagg_errors",
"compression_settings",
"bgw_job_ddl",
"cagg_policy_move",
"merge_compress",
"compress_qualpushdown_saop",
"cagg_planning",
"vectorized_aggregation",
"compression_sequence_num_removal",
"compression_create_compressed_table",
"compressed_detoaster",
"compression_defaults",
"compression_indexcreate",
"plan_skip_scan_dagg",
"transparent_decompression_join_index",
"compress_bloom_sparse",
"-",
"custom_hashagg",
"compression_fks",
"compress_sort_transform",
"decompress_index",
"bgw_policy",
"compression_constraints",
"compression_uuid",
"cagg_api",
"cagg_refresh_using_wal",
"transparent_decompression_ordered_index-17",
"skip_scan_dagg",
"skip_scan",
"cagg_config",
"compress_sparse_config",
"cagg_union_view-17",
"privilege_maintain",
"compression_policy",
"compression_trigger",
"agg_partials_pushdown",
"cagg_query-17",
"compression_allocation"
] |
starryzhang/sweb.eval.x86_64.timescale_1776_timescaledb-8559
|
cilium/tetragon
|
4069
|
cilium__tetragon-4069
|
[
"4056"
] |
67530967429bb42b665a7c7e69cdbc200247ccb4
| "diff --git a/api/v1/README.md b/api/v1/README.md\nindex 6bf750caea6..85596fcf70f 100644\n--- a/api/(...TRUNCATED)
| "diff --git a/pkg/process/podinfo_test.go b/pkg/process/podinfo_test.go\nindex 46dba2c18b0..b06ec51f(...TRUNCATED)
| "Include pod UID in the log\n### Is there an existing issue for this?\n\n- [x] I have searched the e(...TRUNCATED)
| "As discussed in Slack, to implement this you can get inspiration from this https://github.com/ciliu(...TRUNCATED)
| "As discussed in Slack, to implement this you can get inspiration from this https://github.com/ciliu(...TRUNCATED)
|
[
"https://github.com/cilium/tetragon/commit/3d566e06298cffff24f369932ac92e59f3049878"
] |
2025-08-29T05:43:42Z
|
https://github.com/cilium/tetragon/tree/67530967429bb42b665a7c7e69cdbc200247ccb4
|
[
"export PATH=/usr/local/go/bin:$PATH ; make vendor ; make tetragon"
] |
[
"go test ./... -v 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results = {}\n pattern = re.compile((...TRUNCATED)
|
[
"TestK8sWatcher_GetPodInfo",
"TestProcessCache"
] | ["TestTracepointFieldParsing","FuzzProtojsonCompatibility/636add9a0930f826","FuzzProtojsonCompatibil(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.cilium_1776_tetragon-4069
|
quickjs-ng/quickjs
|
1113
|
quickjs-ng__quickjs-1113
|
[
"1112"
] |
3d3b58d8815f0eef8122fad142a65f3bd21d273b
| "diff --git a/quickjs.c b/quickjs.c\nindex 8b9a3b573..01eb10123 100644\n--- a/quickjs.c\n+++ b/quick(...TRUNCATED)
| "diff --git a/tests/test_builtin.js b/tests/test_builtin.js\nindex 3c37f5930..2e86cb4a2 100644\n--- (...TRUNCATED)
| "Cannot extend TypedArray\nThis should work:\n```js\nvar TypedArray = Object.getPrototypeOf(Uint8Arr(...TRUNCATED)
|
[
"https://github.com/quickjs-ng/quickjs/commit/c5e673abf837fced6d324174d653dae7a8584814"
] |
2025-06-24T20:23:51Z
|
https://github.com/quickjs-ng/quickjs/tree/3d3b58d8815f0eef8122fad142a65f3bd21d273b
|
[
"git submodule update --init --checkout --depth 1 ; make"
] | ["./build/run-test262 -vv -m -a -c test262.conf -c test262-fast.conf test262 2>&1 | tee test-output.(...TRUNCATED)
|
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results: dict[str, str] = {}\n # Mat(...TRUNCATED)
| ["test262/test/language/expressions/class/dstr/async-gen-meth-dflt-ary-init-iter-get-err.js","test26(...TRUNCATED)
| ["test262/test/built-ins/Array/fromAsync/this-constructor-with-unsettable-element-closes-sync-iterat(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.quickjs-ng_1776_quickjs-1113
|
||
fluent/fluent-bit
|
10563
|
fluent__fluent-bit-10563
|
[
"10560"
] |
c872957b57b2a8704e3b8cbc7f3994b430f96140
| "diff --git a/plugins/out_loki/loki.c b/plugins/out_loki/loki.c\nindex 2bdcaba3539..ee7a4313a89 1006(...TRUNCATED)
| "diff --git a/tests/runtime/out_loki.c b/tests/runtime/out_loki.c\nindex a440fc0b692..e177b01e675 10(...TRUNCATED)
| "Inconsistent `Remove_Keys` behavior when running Loki output with multiple workers / threads\n## Bu(...TRUNCATED)
|
Wow, that was fast! Thanks!
| ["https://github.com/fluent/fluent-bit/commit/f54cd8193712aed8eb64af38928ed76ef76e619f","https://git(...TRUNCATED)
|
2025-07-09T04:16:04Z
|
https://github.com/fluent/fluent-bit/tree/c872957b57b2a8704e3b8cbc7f3994b430f96140
| ["cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DFLB_DEV=On -DFLB_TESTS_RUNTIME=On -DFLB_(...TRUNCATED)
|
[
"ctest --test-dir build --output-on-failure | tee test-output.log"
] |
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results: dict[str, str] = {}\n # Mat(...TRUNCATED)
|
[
"flb-rt-out_stackdriver"
] | ["flb-rt-out_null","flb-rt-out_stdout","flb-rt-core_accept_timeout","flb-rt-custom_calyptia_input_te(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.fluent_1776_fluent-bit-10563
|
|
php/php-src
|
19478
|
php__php-src-19478
|
[
"19476"
] |
9c754baa99c795a65f40b94e4183150889895218
| "diff --git a/NEWS b/NEWS\nindex 77c1e32ff3a82..0180f5b3478a6 100644\n--- a/NEWS\n+++ b/NEWS\n@@ -5,(...TRUNCATED)
| "diff --git a/Zend/tests/pipe_operator_reference_context.phpt b/Zend/tests/pipe_operator_reference_c(...TRUNCATED)
| "pipe operator (`|>`) fails to correctly handle returning by reference\n### Description\n\nThere app(...TRUNCATED)
|
[
"https://github.com/php/php-src/commit/784755f5defdcaee7b7265bbf5c2d2c1ec3d2e42"
] |
2025-08-14T11:24:49Z
|
https://github.com/php/php-src/tree/9c754baa99c795a65f40b94e4183150889895218
| ["apt-get install -y pkg-config build-essential autoconf bison re2c libxml2-dev libsqlite3-dev ; ./c(...TRUNCATED)
|
[
"make test 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results = {}\n # Match lines like: T(...TRUNCATED)
|
[
"Zend/tests/pipe_operator_reference_context.phpt"
] | ["Zend/tests/dynamic_prop_deprecation_002.phpt","ext/standard/tests/strings/utf8.phpt","tests/lang/b(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.php_1776_php-src-19478
|
||
varnishcache/varnish-cache
|
4370
|
varnishcache__varnish-cache-4370
|
[
"4329"
] |
c3b45a3140d340b2ed1a529b41253c2eb4ba70ae
| "diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c\nindex 8038102(...TRUNCATED)
| "diff --git a/bin/varnishtest/tests/c00055.vtc b/bin/varnishtest/tests/c00055.vtc\nindex 0484219671.(...TRUNCATED)
| "Bad chunked encoding should cause 400 error, not 503\n### Expected Behavior\n\nBad chunked encoding(...TRUNCATED)
|
bugwash agrees.
|
bugwash agrees.
| ["https://github.com/varnishcache/varnish-cache/commit/dbdf05e0b7ec5cb7d4f7f8e991ebc670e011c257","ht(...TRUNCATED)
|
2025-07-21T12:26:30Z
|
https://github.com/varnishcache/varnish-cache/tree/c3b45a3140d340b2ed1a529b41253c2eb4ba70ae
|
[
"./configure ; make -j$(nproc)"
] |
[
"make check VERBOSE=1 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results = {}\n for line in log.split(...TRUNCATED)
|
[
"tests/r02722.vtc",
"tests/f00001.vtc",
"tests/c00067.vtc",
"tests/f00016.vtc",
"tests/c00055.vtc"
] | ["vtest2/tests/a00002.vtc","vtest2/tests/a02016.vtc","tests/b00073.vtc","tests/r00502.vtc","tests/r0(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.varnishcache_1776_varnish-cache-4370
|
rsyslog/rsyslog
|
6047
|
rsyslog__rsyslog-6047
|
[
"2424"
] |
4b3ce60b76362c16f0bece97bafc57fe4aaa3922
| "diff --git a/grammar/rainerscript.c b/grammar/rainerscript.c\nindex 82638305f4..b435b393f3 100644\n(...TRUNCATED)
| "diff --git a/tests/rscript_random_warning.sh b/tests/rscript_random_warning.sh\nnew file mode 10075(...TRUNCATED)
| "rainerscript: random() function does not provide user-visible warning when passed an unsupported va(...TRUNCATED)
|
[
"https://github.com/rsyslog/rsyslog/commit/2f62e6e76b961ae755d4103d60c010af42eedd7b"
] |
2025-08-28T10:26:55Z
|
https://github.com/rsyslog/rsyslog/tree/4b3ce60b76362c16f0bece97bafc57fe4aaa3922
| ["autoreconf -fvi ; ./configure --enable-imfile --enable-mysql --enable-usertools --enable-pgsql --e(...TRUNCATED)
| ["./configure --enable-testbench --enable-imdiag --enable-omstdout && make -j$(nproc) && make check (...TRUNCATED)
|
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results = {}\n for line in log.split(...TRUNCATED)
|
[
"omfwd-lb-1target-retry-full_buf.sh"
] | ["rscript_substring.sh","rscript_set_modify.sh","imuxsock_logger_err.sh","func-substring-invld-start(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.rsyslog_1776_rsyslog-6047
|
||
aws/s2n-tls
|
5481
|
aws__s2n-tls-5481
|
[
"5477"
] |
795f4dc65335b56dfb356d6eaac9429fbae5b75b
| "diff --git a/tls/s2n_auth_selection.c b/tls/s2n_auth_selection.c\nindex 5678f4ae82a..6971f9495e3 10(...TRUNCATED)
| "diff --git a/tests/integrationv2/common.py b/tests/integrationv2/common.py\nindex 5b526a27fbd..2667(...TRUNCATED)
| "s2n-tls server reports signature scheme with RSA kex\n### Security issue notifications\n\nIf you di(...TRUNCATED)
| ["https://github.com/aws/s2n-tls/commit/4f5133dc6c56030fd2b9691f75fd5e7f6259df09","https://github.co(...TRUNCATED)
|
2025-08-25T16:58:39Z
|
https://github.com/aws/s2n-tls/tree/795f4dc65335b56dfb356d6eaac9429fbae5b75b
| ["apt-get install -y libssl-dev ; cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release ; cmake --(...TRUNCATED)
|
[
"ctest --test-dir build --verbose 2>&1 | tee test-output.log"
] |
[
"cat test-output.log"
] | "def parser(log: str) -> dict[str, str]:\n import re\n results: dict[str, str] = {}\n # Mat(...TRUNCATED)
|
[
"s2n_signature_algorithms_test",
"s2n_handshake_test"
] | ["s2n_client_signature_algorithms_extension_test","s2n_tls13_server_cert_test","s2n_client_session_t(...TRUNCATED)
|
starryzhang/sweb.eval.x86_64.aws_1776_s2n-tls-5481
|
End of preview. Expand
in Data Studio
This is the multi - language version of swebench-Live.
We crawled GitHub issues as SWE tasks for coding agents to solve.
We used RepoLaunch to setup docker images as the problem-solving sandboxes for each instance.
Each split is task set of one language.
Each split is filtered by prompting LLM -- discarded if LLM cannot infer test patch from problem statement or problem statement contains solution (in NL or code).
- Downloads last month
- 135