2022-02-08 15:30:38 -05:00
|
|
|
-- directory paths and dlsuffix are passed to us in environment variables
|
|
|
|
|
\getenv libdir PG_LIBDIR
|
|
|
|
|
\getenv dlsuffix PG_DLSUFFIX
|
|
|
|
|
|
|
|
|
|
\set regresslib :libdir '/regress' :dlsuffix
|
|
|
|
|
|
2024-07-08 17:54:59 -04:00
|
|
|
-- Function to assist with verifying EXPLAIN which includes costs. A series
|
|
|
|
|
-- of bool flags allows control over which portions are masked out
|
|
|
|
|
CREATE FUNCTION explain_mask_costs(query text, do_analyze bool,
|
|
|
|
|
hide_costs bool, hide_row_est bool, hide_width bool) RETURNS setof text
|
|
|
|
|
LANGUAGE plpgsql AS
|
|
|
|
|
$$
|
|
|
|
|
DECLARE
|
|
|
|
|
ln text;
|
|
|
|
|
analyze_str text;
|
|
|
|
|
BEGIN
|
|
|
|
|
IF do_analyze = true THEN
|
|
|
|
|
analyze_str := 'on';
|
|
|
|
|
ELSE
|
|
|
|
|
analyze_str := 'off';
|
|
|
|
|
END IF;
|
|
|
|
|
|
2024-07-08 20:46:48 -04:00
|
|
|
-- avoid jit related output by disabling it
|
|
|
|
|
SET LOCAL jit = 0;
|
|
|
|
|
|
2024-07-08 17:54:59 -04:00
|
|
|
FOR ln IN
|
2024-12-11 05:16:44 -05:00
|
|
|
EXECUTE format('explain (analyze %s, costs on, summary off, timing off, buffers off) %s',
|
2024-07-08 17:54:59 -04:00
|
|
|
analyze_str, query)
|
|
|
|
|
LOOP
|
|
|
|
|
IF hide_costs = true THEN
|
|
|
|
|
ln := regexp_replace(ln, 'cost=\d+\.\d\d\.\.\d+\.\d\d', 'cost=N..N');
|
|
|
|
|
END IF;
|
|
|
|
|
|
|
|
|
|
IF hide_row_est = true THEN
|
|
|
|
|
-- don't use 'g' so that we leave the actual rows intact
|
|
|
|
|
ln := regexp_replace(ln, 'rows=\d+', 'rows=N');
|
|
|
|
|
END IF;
|
|
|
|
|
|
|
|
|
|
IF hide_width = true THEN
|
|
|
|
|
ln := regexp_replace(ln, 'width=\d+', 'width=N');
|
|
|
|
|
END IF;
|
|
|
|
|
|
|
|
|
|
RETURN NEXT ln;
|
|
|
|
|
END LOOP;
|
|
|
|
|
END;
|
|
|
|
|
$$;
|
|
|
|
|
|
2016-02-04 23:03:10 -05:00
|
|
|
--
|
|
|
|
|
-- num_nulls()
|
|
|
|
|
--
|
|
|
|
|
|
|
|
|
|
SELECT num_nonnulls(NULL);
|
|
|
|
|
SELECT num_nonnulls('1');
|
|
|
|
|
SELECT num_nonnulls(NULL::text);
|
|
|
|
|
SELECT num_nonnulls(NULL::text, NULL::int);
|
|
|
|
|
SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL);
|
|
|
|
|
SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]);
|
|
|
|
|
SELECT num_nonnulls(VARIADIC '{"1","2","3","4"}'::text[]);
|
|
|
|
|
SELECT num_nonnulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i));
|
|
|
|
|
|
|
|
|
|
SELECT num_nulls(NULL);
|
|
|
|
|
SELECT num_nulls('1');
|
|
|
|
|
SELECT num_nulls(NULL::text);
|
|
|
|
|
SELECT num_nulls(NULL::text, NULL::int);
|
|
|
|
|
SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL);
|
|
|
|
|
SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]);
|
|
|
|
|
SELECT num_nulls(VARIADIC '{"1","2","3","4"}'::text[]);
|
|
|
|
|
SELECT num_nulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i));
|
|
|
|
|
|
|
|
|
|
-- special cases
|
|
|
|
|
SELECT num_nonnulls(VARIADIC NULL::text[]);
|
|
|
|
|
SELECT num_nonnulls(VARIADIC '{}'::int[]);
|
|
|
|
|
SELECT num_nulls(VARIADIC NULL::text[]);
|
|
|
|
|
SELECT num_nulls(VARIADIC '{}'::int[]);
|
|
|
|
|
|
|
|
|
|
-- should fail, one or more arguments is required
|
|
|
|
|
SELECT num_nonnulls();
|
|
|
|
|
SELECT num_nulls();
|
2019-02-09 18:32:23 -05:00
|
|
|
|
Make canonicalize_path() more canonical.
Teach canonicalize_path() how to strip all unnecessary uses of "."
and "..", replacing the previous ad-hoc code that got rid of only
some such cases. In particular, we can always remove all such
uses from absolute paths.
The proximate reason to do this is that Windows rejects paths
involving ".." in some cases (in particular, you can't put one in a
symlink), so we ought to be sure we don't use ".." unnecessarily.
Moreover, it seems like good cleanup on general principles.
There is other path-munging code that could be simplified now, but
we'll leave that for followup work.
It is tempting to call this a bug fix and back-patch it. On the other
hand, the misbehavior can only be reached if a highly privileged user
does something dubious, so it's not unreasonable to say "so don't do
that". And this patch could result in unexpected behavioral changes,
in case anybody was expecting uses of ".." to stay put. So at least
for now, just put it in HEAD.
Shenhao Wang, editorialized a bit by me
Discussion: https://postgr.es/m/OSBPR01MB4214FA221FFE046F11F2AD74F2D49@OSBPR01MB4214.jpnprd01.prod.outlook.com
2022-01-31 12:05:37 -05:00
|
|
|
--
|
|
|
|
|
-- canonicalize_path()
|
|
|
|
|
--
|
|
|
|
|
|
2022-02-08 15:30:38 -05:00
|
|
|
CREATE FUNCTION test_canonicalize_path(text)
|
|
|
|
|
RETURNS text
|
|
|
|
|
AS :'regresslib'
|
|
|
|
|
LANGUAGE C STRICT IMMUTABLE;
|
|
|
|
|
|
Make canonicalize_path() more canonical.
Teach canonicalize_path() how to strip all unnecessary uses of "."
and "..", replacing the previous ad-hoc code that got rid of only
some such cases. In particular, we can always remove all such
uses from absolute paths.
The proximate reason to do this is that Windows rejects paths
involving ".." in some cases (in particular, you can't put one in a
symlink), so we ought to be sure we don't use ".." unnecessarily.
Moreover, it seems like good cleanup on general principles.
There is other path-munging code that could be simplified now, but
we'll leave that for followup work.
It is tempting to call this a bug fix and back-patch it. On the other
hand, the misbehavior can only be reached if a highly privileged user
does something dubious, so it's not unreasonable to say "so don't do
that". And this patch could result in unexpected behavioral changes,
in case anybody was expecting uses of ".." to stay put. So at least
for now, just put it in HEAD.
Shenhao Wang, editorialized a bit by me
Discussion: https://postgr.es/m/OSBPR01MB4214FA221FFE046F11F2AD74F2D49@OSBPR01MB4214.jpnprd01.prod.outlook.com
2022-01-31 12:05:37 -05:00
|
|
|
SELECT test_canonicalize_path('/');
|
|
|
|
|
SELECT test_canonicalize_path('/./abc/def/');
|
|
|
|
|
SELECT test_canonicalize_path('/./../abc/def');
|
|
|
|
|
SELECT test_canonicalize_path('/./../../abc/def/');
|
|
|
|
|
SELECT test_canonicalize_path('/abc/.././def/ghi');
|
|
|
|
|
SELECT test_canonicalize_path('/abc/./../def/ghi//');
|
|
|
|
|
SELECT test_canonicalize_path('/abc/def/../..');
|
|
|
|
|
SELECT test_canonicalize_path('/abc/def/../../..');
|
|
|
|
|
SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl');
|
|
|
|
|
SELECT test_canonicalize_path('.');
|
|
|
|
|
SELECT test_canonicalize_path('./');
|
|
|
|
|
SELECT test_canonicalize_path('./abc/..');
|
|
|
|
|
SELECT test_canonicalize_path('abc/../');
|
|
|
|
|
SELECT test_canonicalize_path('abc/../def');
|
|
|
|
|
SELECT test_canonicalize_path('..');
|
|
|
|
|
SELECT test_canonicalize_path('../abc/def');
|
|
|
|
|
SELECT test_canonicalize_path('../abc/..');
|
|
|
|
|
SELECT test_canonicalize_path('../abc/../def');
|
|
|
|
|
SELECT test_canonicalize_path('../abc/../../def/ghi');
|
|
|
|
|
SELECT test_canonicalize_path('./abc/./def/.');
|
|
|
|
|
SELECT test_canonicalize_path('./abc/././def/.');
|
|
|
|
|
SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno');
|
|
|
|
|
|
Add function to log the memory contexts of specified backend process.
Commit 3e98c0bafb added pg_backend_memory_contexts view to display
the memory contexts of the backend process. However its target process
is limited to the backend that is accessing to the view. So this is
not so convenient when investigating the local memory bloat of other
backend process. To improve this situation, this commit adds
pg_log_backend_memory_contexts() function that requests to log
the memory contexts of the specified backend process.
This information can be also collected by calling
MemoryContextStats(TopMemoryContext) via a debugger. But
this technique cannot be used in some environments because no debugger
is available there. So, pg_log_backend_memory_contexts() allows us to
see the memory contexts of specified backend more easily.
Only superusers are allowed to request to log the memory contexts
because allowing any users to issue this request at an unbounded rate
would cause lots of log messages and which can lead to denial of service.
On receipt of the request, at the next CHECK_FOR_INTERRUPTS(),
the target backend logs its memory contexts at LOG_SERVER_ONLY level,
so that these memory contexts will appear in the server log but not
be sent to the client. It logs one message per memory context.
Because if it buffers all memory contexts into StringInfo to log them
as one message, which may require the buffer to be enlarged very much
and lead to OOM error since there can be a large number of memory
contexts in a backend.
When a backend process is consuming huge memory, logging all its
memory contexts might overrun available disk space. To prevent this,
now this patch limits the number of child contexts to log per parent
to 100. As with MemoryContextStats(), it supposes that practical cases
where the log gets long will typically be huge numbers of siblings
under the same parent context; while the additional debugging value
from seeing details about individual siblings beyond 100 will not be large.
There was another proposed patch to add the function to return
the memory contexts of specified backend as the result sets,
instead of logging them, in the discussion. However that patch is
not included in this commit because it had several issues to address.
Thanks to Tatsuhito Kasahara, Andres Freund, Tom Lane, Tomas Vondra,
Michael Paquier, Kyotaro Horiguchi and Zhihong Yu for the discussion.
Bump catalog version.
Author: Atsushi Torikoshi
Reviewed-by: Kyotaro Horiguchi, Zhihong Yu, Fujii Masao
Discussion: https://postgr.es/m/0271f440ac77f2a4180e0e56ebd944d1@oss.nttdata.com
2021-04-06 00:44:15 -04:00
|
|
|
--
|
|
|
|
|
-- pg_log_backend_memory_contexts()
|
|
|
|
|
--
|
|
|
|
|
-- Memory contexts are logged and they are not returned to the function.
|
|
|
|
|
-- Furthermore, their contents can vary depending on the timing. However,
|
2021-10-26 16:13:52 -04:00
|
|
|
-- we can at least verify that the code doesn't fail, and that the
|
|
|
|
|
-- permissions are set properly.
|
Add function to log the memory contexts of specified backend process.
Commit 3e98c0bafb added pg_backend_memory_contexts view to display
the memory contexts of the backend process. However its target process
is limited to the backend that is accessing to the view. So this is
not so convenient when investigating the local memory bloat of other
backend process. To improve this situation, this commit adds
pg_log_backend_memory_contexts() function that requests to log
the memory contexts of the specified backend process.
This information can be also collected by calling
MemoryContextStats(TopMemoryContext) via a debugger. But
this technique cannot be used in some environments because no debugger
is available there. So, pg_log_backend_memory_contexts() allows us to
see the memory contexts of specified backend more easily.
Only superusers are allowed to request to log the memory contexts
because allowing any users to issue this request at an unbounded rate
would cause lots of log messages and which can lead to denial of service.
On receipt of the request, at the next CHECK_FOR_INTERRUPTS(),
the target backend logs its memory contexts at LOG_SERVER_ONLY level,
so that these memory contexts will appear in the server log but not
be sent to the client. It logs one message per memory context.
Because if it buffers all memory contexts into StringInfo to log them
as one message, which may require the buffer to be enlarged very much
and lead to OOM error since there can be a large number of memory
contexts in a backend.
When a backend process is consuming huge memory, logging all its
memory contexts might overrun available disk space. To prevent this,
now this patch limits the number of child contexts to log per parent
to 100. As with MemoryContextStats(), it supposes that practical cases
where the log gets long will typically be huge numbers of siblings
under the same parent context; while the additional debugging value
from seeing details about individual siblings beyond 100 will not be large.
There was another proposed patch to add the function to return
the memory contexts of specified backend as the result sets,
instead of logging them, in the discussion. However that patch is
not included in this commit because it had several issues to address.
Thanks to Tatsuhito Kasahara, Andres Freund, Tom Lane, Tomas Vondra,
Michael Paquier, Kyotaro Horiguchi and Zhihong Yu for the discussion.
Bump catalog version.
Author: Atsushi Torikoshi
Reviewed-by: Kyotaro Horiguchi, Zhihong Yu, Fujii Masao
Discussion: https://postgr.es/m/0271f440ac77f2a4180e0e56ebd944d1@oss.nttdata.com
2021-04-06 00:44:15 -04:00
|
|
|
--
|
2021-10-26 16:13:52 -04:00
|
|
|
|
|
|
|
|
SELECT pg_log_backend_memory_contexts(pg_backend_pid());
|
|
|
|
|
|
2022-01-11 09:19:59 -05:00
|
|
|
SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity
|
|
|
|
|
WHERE backend_type = 'checkpointer';
|
|
|
|
|
|
2021-10-26 16:13:52 -04:00
|
|
|
CREATE ROLE regress_log_memory;
|
|
|
|
|
|
|
|
|
|
SELECT has_function_privilege('regress_log_memory',
|
|
|
|
|
'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no
|
|
|
|
|
|
|
|
|
|
GRANT EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer)
|
|
|
|
|
TO regress_log_memory;
|
|
|
|
|
|
|
|
|
|
SELECT has_function_privilege('regress_log_memory',
|
|
|
|
|
'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- yes
|
|
|
|
|
|
|
|
|
|
SET ROLE regress_log_memory;
|
|
|
|
|
SELECT pg_log_backend_memory_contexts(pg_backend_pid());
|
|
|
|
|
RESET ROLE;
|
|
|
|
|
|
|
|
|
|
REVOKE EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer)
|
|
|
|
|
FROM regress_log_memory;
|
|
|
|
|
|
|
|
|
|
DROP ROLE regress_log_memory;
|
Add function to log the memory contexts of specified backend process.
Commit 3e98c0bafb added pg_backend_memory_contexts view to display
the memory contexts of the backend process. However its target process
is limited to the backend that is accessing to the view. So this is
not so convenient when investigating the local memory bloat of other
backend process. To improve this situation, this commit adds
pg_log_backend_memory_contexts() function that requests to log
the memory contexts of the specified backend process.
This information can be also collected by calling
MemoryContextStats(TopMemoryContext) via a debugger. But
this technique cannot be used in some environments because no debugger
is available there. So, pg_log_backend_memory_contexts() allows us to
see the memory contexts of specified backend more easily.
Only superusers are allowed to request to log the memory contexts
because allowing any users to issue this request at an unbounded rate
would cause lots of log messages and which can lead to denial of service.
On receipt of the request, at the next CHECK_FOR_INTERRUPTS(),
the target backend logs its memory contexts at LOG_SERVER_ONLY level,
so that these memory contexts will appear in the server log but not
be sent to the client. It logs one message per memory context.
Because if it buffers all memory contexts into StringInfo to log them
as one message, which may require the buffer to be enlarged very much
and lead to OOM error since there can be a large number of memory
contexts in a backend.
When a backend process is consuming huge memory, logging all its
memory contexts might overrun available disk space. To prevent this,
now this patch limits the number of child contexts to log per parent
to 100. As with MemoryContextStats(), it supposes that practical cases
where the log gets long will typically be huge numbers of siblings
under the same parent context; while the additional debugging value
from seeing details about individual siblings beyond 100 will not be large.
There was another proposed patch to add the function to return
the memory contexts of specified backend as the result sets,
instead of logging them, in the discussion. However that patch is
not included in this commit because it had several issues to address.
Thanks to Tatsuhito Kasahara, Andres Freund, Tom Lane, Tomas Vondra,
Michael Paquier, Kyotaro Horiguchi and Zhihong Yu for the discussion.
Bump catalog version.
Author: Atsushi Torikoshi
Reviewed-by: Kyotaro Horiguchi, Zhihong Yu, Fujii Masao
Discussion: https://postgr.es/m/0271f440ac77f2a4180e0e56ebd944d1@oss.nttdata.com
2021-04-06 00:44:15 -04:00
|
|
|
|
Avoid holding a directory FD open across pg_ls_dir_files() calls.
This coding technique is undesirable because (a) it leaks the FD for
the rest of the transaction if the SRF is not run to completion, and
(b) allocated FDs are a scarce resource, but multiple interleaved
uses of the relevant functions could eat many such FDs.
In v11 and later, a query such as "SELECT pg_ls_waldir() LIMIT 1"
yields a warning about the leaked FD, and the only reason there's
no warning in earlier branches is that fd.c didn't whine about such
leaks before commit 9cb7db3f0. Even disregarding the warning, it
wouldn't be too hard to run a backend out of FDs with careless use
of these SQL functions.
Hence, rewrite the function so that it reads the directory within
a single call, returning the results as a tuplestore rather than
via value-per-call mode.
There are half a dozen other built-in SRFs with similar problems,
but let's fix this one to start with, just to see if the buildfarm
finds anything wrong with the code.
In passing, fix bogus error report for stat() failure: it was
whining about the directory when it should be fingering the
individual file. Doubtless a copy-and-paste error.
Back-patch to v10 where this function was added.
Justin Pryzby, with cosmetic tweaks and test cases by me
Discussion: https://postgr.es/m/20200308173103.GC1357@telsasoft.com
2020-03-11 15:27:59 -04:00
|
|
|
--
|
|
|
|
|
-- Test some built-in SRFs
|
|
|
|
|
--
|
|
|
|
|
-- The outputs of these are variable, so we can't just print their results
|
|
|
|
|
-- directly, but we can at least verify that the code doesn't fail.
|
|
|
|
|
--
|
|
|
|
|
select setting as segsize
|
|
|
|
|
from pg_settings where name = 'wal_segment_size'
|
|
|
|
|
\gset
|
|
|
|
|
|
|
|
|
|
select count(*) > 0 as ok from pg_ls_waldir();
|
|
|
|
|
-- Test ProjectSet as well as FunctionScan
|
|
|
|
|
select count(*) > 0 as ok from (select pg_ls_waldir()) ss;
|
|
|
|
|
-- Test not-run-to-completion cases.
|
|
|
|
|
select * from pg_ls_waldir() limit 0;
|
|
|
|
|
select count(*) > 0 as ok from (select * from pg_ls_waldir() limit 1) ss;
|
2020-03-11 18:23:57 -04:00
|
|
|
select (w).size = :segsize as ok
|
|
|
|
|
from (select pg_ls_waldir() w) ss where length((w).name) = 24 limit 1;
|
Avoid holding a directory FD open across pg_ls_dir_files() calls.
This coding technique is undesirable because (a) it leaks the FD for
the rest of the transaction if the SRF is not run to completion, and
(b) allocated FDs are a scarce resource, but multiple interleaved
uses of the relevant functions could eat many such FDs.
In v11 and later, a query such as "SELECT pg_ls_waldir() LIMIT 1"
yields a warning about the leaked FD, and the only reason there's
no warning in earlier branches is that fd.c didn't whine about such
leaks before commit 9cb7db3f0. Even disregarding the warning, it
wouldn't be too hard to run a backend out of FDs with careless use
of these SQL functions.
Hence, rewrite the function so that it reads the directory within
a single call, returning the results as a tuplestore rather than
via value-per-call mode.
There are half a dozen other built-in SRFs with similar problems,
but let's fix this one to start with, just to see if the buildfarm
finds anything wrong with the code.
In passing, fix bogus error report for stat() failure: it was
whining about the directory when it should be fingering the
individual file. Doubtless a copy-and-paste error.
Back-patch to v10 where this function was added.
Justin Pryzby, with cosmetic tweaks and test cases by me
Discussion: https://postgr.es/m/20200308173103.GC1357@telsasoft.com
2020-03-11 15:27:59 -04:00
|
|
|
|
|
|
|
|
select count(*) >= 0 as ok from pg_ls_archive_statusdir();
|
2024-10-11 12:02:09 -04:00
|
|
|
select count(*) >= 0 as ok from pg_ls_summariesdir();
|
Avoid holding a directory FD open across pg_ls_dir_files() calls.
This coding technique is undesirable because (a) it leaks the FD for
the rest of the transaction if the SRF is not run to completion, and
(b) allocated FDs are a scarce resource, but multiple interleaved
uses of the relevant functions could eat many such FDs.
In v11 and later, a query such as "SELECT pg_ls_waldir() LIMIT 1"
yields a warning about the leaked FD, and the only reason there's
no warning in earlier branches is that fd.c didn't whine about such
leaks before commit 9cb7db3f0. Even disregarding the warning, it
wouldn't be too hard to run a backend out of FDs with careless use
of these SQL functions.
Hence, rewrite the function so that it reads the directory within
a single call, returning the results as a tuplestore rather than
via value-per-call mode.
There are half a dozen other built-in SRFs with similar problems,
but let's fix this one to start with, just to see if the buildfarm
finds anything wrong with the code.
In passing, fix bogus error report for stat() failure: it was
whining about the directory when it should be fingering the
individual file. Doubtless a copy-and-paste error.
Back-patch to v10 where this function was added.
Justin Pryzby, with cosmetic tweaks and test cases by me
Discussion: https://postgr.es/m/20200308173103.GC1357@telsasoft.com
2020-03-11 15:27:59 -04:00
|
|
|
|
2022-07-29 15:38:49 -04:00
|
|
|
-- pg_read_file()
|
2022-07-30 11:17:07 -04:00
|
|
|
select length(pg_read_file('postmaster.pid')) > 20;
|
|
|
|
|
select length(pg_read_file('postmaster.pid', 1, 20));
|
2022-07-29 15:38:49 -04:00
|
|
|
-- Test missing_ok
|
|
|
|
|
select pg_read_file('does not exist'); -- error
|
|
|
|
|
select pg_read_file('does not exist', true) IS NULL; -- ok
|
|
|
|
|
-- Test invalid argument
|
|
|
|
|
select pg_read_file('does not exist', 0, -1); -- error
|
|
|
|
|
select pg_read_file('does not exist', 0, -1, true); -- error
|
|
|
|
|
|
|
|
|
|
-- pg_read_binary_file()
|
2022-07-30 11:17:07 -04:00
|
|
|
select length(pg_read_binary_file('postmaster.pid')) > 20;
|
|
|
|
|
select length(pg_read_binary_file('postmaster.pid', 1, 20));
|
2022-07-29 15:38:49 -04:00
|
|
|
-- Test missing_ok
|
|
|
|
|
select pg_read_binary_file('does not exist'); -- error
|
|
|
|
|
select pg_read_binary_file('does not exist', true) IS NULL; -- ok
|
|
|
|
|
-- Test invalid argument
|
|
|
|
|
select pg_read_binary_file('does not exist', 0, -1); -- error
|
|
|
|
|
select pg_read_binary_file('does not exist', 0, -1, true); -- error
|
|
|
|
|
|
|
|
|
|
-- pg_stat_file()
|
2022-07-30 11:17:07 -04:00
|
|
|
select size > 20, isdir from pg_stat_file('postmaster.pid');
|
2022-07-29 15:38:49 -04:00
|
|
|
|
|
|
|
|
-- pg_ls_dir()
|
2020-03-16 21:05:28 -04:00
|
|
|
select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1;
|
2022-03-14 21:52:19 -04:00
|
|
|
-- Test missing_ok (second argument)
|
|
|
|
|
select pg_ls_dir('does not exist', false, false); -- error
|
|
|
|
|
select pg_ls_dir('does not exist', true, false); -- ok
|
|
|
|
|
-- Test include_dot_dirs (third argument)
|
|
|
|
|
select count(*) = 1 as dot_found
|
|
|
|
|
from pg_ls_dir('.', false, true) as ls where ls = '.';
|
|
|
|
|
select count(*) = 1 as dot_found
|
|
|
|
|
from pg_ls_dir('.', false, false) as ls where ls = '.';
|
2020-03-16 21:05:28 -04:00
|
|
|
|
2022-07-29 15:38:49 -04:00
|
|
|
-- pg_timezone_names()
|
2020-03-16 21:05:28 -04:00
|
|
|
select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1;
|
|
|
|
|
|
2022-07-29 15:38:49 -04:00
|
|
|
-- pg_tablespace_databases()
|
2020-03-16 21:05:28 -04:00
|
|
|
select count(*) > 0 from
|
|
|
|
|
(select pg_tablespace_databases(oid) as pts from pg_tablespace
|
|
|
|
|
where spcname = 'pg_default') pts
|
|
|
|
|
join pg_database db on pts.pts = db.oid;
|
|
|
|
|
|
2021-11-23 05:29:42 -05:00
|
|
|
--
|
|
|
|
|
-- Test replication slot directory functions
|
|
|
|
|
--
|
|
|
|
|
CREATE ROLE regress_slot_dir_funcs;
|
|
|
|
|
-- Not available by default.
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_logicalsnapdir()', 'EXECUTE');
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_logicalmapdir()', 'EXECUTE');
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_replslotdir(text)', 'EXECUTE');
|
|
|
|
|
GRANT pg_monitor TO regress_slot_dir_funcs;
|
|
|
|
|
-- Role is now part of pg_monitor, so these are available.
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_logicalsnapdir()', 'EXECUTE');
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_logicalmapdir()', 'EXECUTE');
|
|
|
|
|
SELECT has_function_privilege('regress_slot_dir_funcs',
|
|
|
|
|
'pg_ls_replslotdir(text)', 'EXECUTE');
|
|
|
|
|
DROP ROLE regress_slot_dir_funcs;
|
|
|
|
|
|
2019-02-09 18:32:23 -05:00
|
|
|
--
|
|
|
|
|
-- Test adding a support function to a subject function
|
|
|
|
|
--
|
|
|
|
|
|
|
|
|
|
CREATE FUNCTION my_int_eq(int, int) RETURNS bool
|
|
|
|
|
LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE
|
|
|
|
|
AS $$int4eq$$;
|
|
|
|
|
|
|
|
|
|
-- By default, planner does not think that's selective
|
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
|
SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1
|
|
|
|
|
WHERE my_int_eq(a.unique2, 42);
|
|
|
|
|
|
|
|
|
|
-- With support function that knows it's int4eq, we get a different plan
|
2022-02-08 15:30:38 -05:00
|
|
|
CREATE FUNCTION test_support_func(internal)
|
|
|
|
|
RETURNS internal
|
|
|
|
|
AS :'regresslib', 'test_support_func'
|
|
|
|
|
LANGUAGE C STRICT;
|
|
|
|
|
|
2019-02-09 18:32:23 -05:00
|
|
|
ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func;
|
|
|
|
|
|
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
|
SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1
|
|
|
|
|
WHERE my_int_eq(a.unique2, 42);
|
|
|
|
|
|
|
|
|
|
-- Also test non-default rowcount estimate
|
|
|
|
|
CREATE FUNCTION my_gen_series(int, int) RETURNS SETOF integer
|
|
|
|
|
LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE
|
|
|
|
|
AS $$generate_series_int4$$
|
|
|
|
|
SUPPORT test_support_func;
|
|
|
|
|
|
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
|
SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g;
|
|
|
|
|
|
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
|
SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g;
|
2022-10-26 20:58:44 -04:00
|
|
|
|
2024-07-08 17:54:59 -04:00
|
|
|
--
|
|
|
|
|
-- Test the SupportRequestRows support function for generate_series_timestamp()
|
|
|
|
|
--
|
|
|
|
|
|
|
|
|
|
-- Ensure the row estimate matches the actual rows
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '1 day') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- As above but with generate_series_timestamp
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMP '2024-02-01', TIMESTAMP '2024-03-01', INTERVAL '1 day') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- As above but with generate_series_timestamptz_at_zone()
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '1 day', 'UTC') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure the estimated and actual row counts match when the range isn't
|
|
|
|
|
-- evenly divisible by the step
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '7 day') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure the estimates match when step is decreasing
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-03-01', TIMESTAMPTZ '2024-02-01', INTERVAL '-1 day') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure an empty range estimates 1 row
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-03-01', TIMESTAMPTZ '2024-02-01', INTERVAL '1 day') g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure we get the default row estimate for infinity values
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '-infinity', TIMESTAMPTZ 'infinity', INTERVAL '1 day') g(s);$$,
|
|
|
|
|
false, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure the row estimate behaves correctly when step size is zero.
|
|
|
|
|
-- We expect generate_series_timestamp() to throw the error rather than in
|
|
|
|
|
-- the support function.
|
|
|
|
|
SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '0 day') g(s);
|
|
|
|
|
|
2024-12-02 06:37:57 -05:00
|
|
|
--
|
|
|
|
|
-- Test the SupportRequestRows support function for generate_series_numeric()
|
|
|
|
|
--
|
|
|
|
|
|
|
|
|
|
-- Ensure the row estimate matches the actual rows
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(1.0, 25.0) g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- As above but with non-default step
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(1.0, 25.0, 2.0) g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure the estimates match when step is decreasing
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(25.0, 1.0, -1.0) g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure an empty range estimates 1 row
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(25.0, 1.0, 1.0) g(s);$$,
|
|
|
|
|
true, true, false, true);
|
|
|
|
|
|
|
|
|
|
-- Ensure we get the default row estimate for error cases (infinity/NaN values
|
|
|
|
|
-- and zero step size)
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series('-infinity'::NUMERIC, 'infinity'::NUMERIC, 1.0) g(s);$$,
|
|
|
|
|
false, true, false, true);
|
|
|
|
|
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(1.0, 25.0, 'NaN'::NUMERIC) g(s);$$,
|
|
|
|
|
false, true, false, true);
|
|
|
|
|
|
|
|
|
|
SELECT explain_mask_costs($$
|
|
|
|
|
SELECT * FROM generate_series(25.0, 2.0, 0.0) g(s);$$,
|
|
|
|
|
false, true, false, true);
|
|
|
|
|
|
2022-10-26 20:58:44 -04:00
|
|
|
-- Test functions for control data
|
|
|
|
|
SELECT count(*) > 0 AS ok FROM pg_control_checkpoint();
|
|
|
|
|
SELECT count(*) > 0 AS ok FROM pg_control_init();
|
|
|
|
|
SELECT count(*) > 0 AS ok FROM pg_control_recovery();
|
|
|
|
|
SELECT count(*) > 0 AS ok FROM pg_control_system();
|
2022-12-19 23:36:27 -05:00
|
|
|
|
2023-11-24 19:44:09 -05:00
|
|
|
-- pg_split_walfile_name, pg_walfile_name & pg_walfile_name_offset
|
2022-12-22 19:15:01 -05:00
|
|
|
SELECT * FROM pg_split_walfile_name(NULL);
|
|
|
|
|
SELECT * FROM pg_split_walfile_name('invalid');
|
|
|
|
|
SELECT segment_number > 0 AS ok_segment_number, timeline_id
|
|
|
|
|
FROM pg_split_walfile_name('000000010000000100000000');
|
|
|
|
|
SELECT segment_number > 0 AS ok_segment_number, timeline_id
|
|
|
|
|
FROM pg_split_walfile_name('ffffffFF00000001000000af');
|
2023-11-24 19:44:09 -05:00
|
|
|
SELECT setting::int8 AS segment_size
|
|
|
|
|
FROM pg_settings
|
|
|
|
|
WHERE name = 'wal_segment_size'
|
|
|
|
|
\gset
|
|
|
|
|
SELECT segment_number, file_offset
|
|
|
|
|
FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size),
|
|
|
|
|
pg_split_walfile_name(file_name);
|
|
|
|
|
SELECT segment_number, file_offset
|
|
|
|
|
FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size + 1),
|
|
|
|
|
pg_split_walfile_name(file_name);
|
|
|
|
|
SELECT segment_number, file_offset = :segment_size - 1
|
|
|
|
|
FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size - 1),
|
|
|
|
|
pg_split_walfile_name(file_name);
|
2024-01-19 09:41:44 -05:00
|
|
|
|
2024-02-14 12:48:29 -05:00
|
|
|
-- pg_current_logfile
|
|
|
|
|
CREATE ROLE regress_current_logfile;
|
|
|
|
|
-- not available by default
|
|
|
|
|
SELECT has_function_privilege('regress_current_logfile',
|
|
|
|
|
'pg_current_logfile()', 'EXECUTE');
|
|
|
|
|
GRANT pg_monitor TO regress_current_logfile;
|
|
|
|
|
-- role has privileges of pg_monitor and can execute the function
|
|
|
|
|
SELECT has_function_privilege('regress_current_logfile',
|
|
|
|
|
'pg_current_logfile()', 'EXECUTE');
|
|
|
|
|
DROP ROLE regress_current_logfile;
|
2024-03-14 11:58:00 -04:00
|
|
|
|
|
|
|
|
-- pg_column_toast_chunk_id
|
|
|
|
|
CREATE TABLE test_chunk_id (a TEXT, b TEXT STORAGE EXTERNAL);
|
|
|
|
|
INSERT INTO test_chunk_id VALUES ('x', repeat('x', 8192));
|
|
|
|
|
SELECT t.relname AS toastrel FROM pg_class c
|
|
|
|
|
LEFT JOIN pg_class t ON c.reltoastrelid = t.oid
|
|
|
|
|
WHERE c.relname = 'test_chunk_id'
|
|
|
|
|
\gset
|
|
|
|
|
SELECT pg_column_toast_chunk_id(a) IS NULL,
|
|
|
|
|
pg_column_toast_chunk_id(b) IN (SELECT chunk_id FROM pg_toast.:toastrel)
|
|
|
|
|
FROM test_chunk_id;
|
|
|
|
|
DROP TABLE test_chunk_id;
|
2024-07-08 17:54:59 -04:00
|
|
|
DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool);
|
2024-09-17 04:19:26 -04:00
|
|
|
|
2025-06-02 02:33:04 -04:00
|
|
|
-- test stratnum translation support functions
|
|
|
|
|
SELECT gist_translate_cmptype_common(7);
|
|
|
|
|
SELECT gist_translate_cmptype_common(3);
|
2025-02-25 09:02:07 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
-- relpath tests
|
|
|
|
|
CREATE FUNCTION test_relpath()
|
|
|
|
|
RETURNS void
|
|
|
|
|
AS :'regresslib'
|
|
|
|
|
LANGUAGE C;
|
|
|
|
|
SELECT test_relpath();
|
2025-05-07 15:47:36 -04:00
|
|
|
|
|
|
|
|
-- pg_replication_origin.roname limit
|
|
|
|
|
SELECT pg_replication_origin_create('regress_' || repeat('a', 505));
|