Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
-- When there is a LIMIT clause, incremental sort is beneficial because
|
|
|
|
|
-- it only has to sort some of the groups, and not the entire table.
|
|
|
|
|
explain (costs off)
|
|
|
|
|
select * from (select * from tenk1 order by four) t order by four, ten
|
|
|
|
|
limit 1;
|
|
|
|
|
|
|
|
|
|
-- When work_mem is not enough to sort the entire table, incremental sort
|
|
|
|
|
-- may be faster if individual groups still fit into work_mem.
|
|
|
|
|
set work_mem to '2MB';
|
|
|
|
|
explain (costs off)
|
|
|
|
|
select * from (select * from tenk1 order by four) t order by four, ten;
|
|
|
|
|
reset work_mem;
|
|
|
|
|
|
|
|
|
|
create table t(a integer, b integer);
|
|
|
|
|
|
|
|
|
|
create or replace function explain_analyze_without_memory(query text)
|
|
|
|
|
returns table (out_line text) language plpgsql
|
|
|
|
|
as
|
|
|
|
|
$$
|
|
|
|
|
declare
|
|
|
|
|
line text;
|
|
|
|
|
begin
|
|
|
|
|
for line in
|
|
|
|
|
execute 'explain (analyze, costs off, summary off, timing off) ' || query
|
|
|
|
|
loop
|
|
|
|
|
out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g');
|
|
|
|
|
return next;
|
|
|
|
|
end loop;
|
|
|
|
|
end;
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
create or replace function explain_analyze_inc_sort_nodes(query text)
|
|
|
|
|
returns jsonb language plpgsql
|
|
|
|
|
as
|
|
|
|
|
$$
|
|
|
|
|
declare
|
|
|
|
|
elements jsonb;
|
|
|
|
|
element jsonb;
|
|
|
|
|
matching_nodes jsonb := '[]'::jsonb;
|
|
|
|
|
begin
|
|
|
|
|
execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements;
|
|
|
|
|
while jsonb_array_length(elements) > 0 loop
|
|
|
|
|
element := elements->0;
|
|
|
|
|
elements := elements - 0;
|
|
|
|
|
case jsonb_typeof(element)
|
|
|
|
|
when 'array' then
|
|
|
|
|
if jsonb_array_length(element) > 0 then
|
|
|
|
|
elements := elements || element;
|
|
|
|
|
end if;
|
|
|
|
|
when 'object' then
|
|
|
|
|
if element ? 'Plan' then
|
|
|
|
|
elements := elements || jsonb_build_array(element->'Plan');
|
|
|
|
|
element := element - 'Plan';
|
|
|
|
|
else
|
|
|
|
|
if element ? 'Plans' then
|
|
|
|
|
elements := elements || jsonb_build_array(element->'Plans');
|
|
|
|
|
element := element - 'Plans';
|
|
|
|
|
end if;
|
|
|
|
|
if (element->>'Node Type')::text = 'Incremental Sort' then
|
|
|
|
|
matching_nodes := matching_nodes || element;
|
|
|
|
|
end if;
|
|
|
|
|
end if;
|
|
|
|
|
end case;
|
|
|
|
|
end loop;
|
|
|
|
|
return matching_nodes;
|
|
|
|
|
end;
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
create or replace function explain_analyze_inc_sort_nodes_without_memory(query text)
|
|
|
|
|
returns jsonb language plpgsql
|
|
|
|
|
as
|
|
|
|
|
$$
|
|
|
|
|
declare
|
|
|
|
|
nodes jsonb := '[]'::jsonb;
|
|
|
|
|
node jsonb;
|
|
|
|
|
group_key text;
|
|
|
|
|
space_key text;
|
|
|
|
|
begin
|
|
|
|
|
for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
|
2020-05-12 14:04:39 -04:00
|
|
|
for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
|
|
|
|
|
node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false);
|
2020-04-07 12:03:24 -04:00
|
|
|
node := jsonb_set(node, array[group_key, space_key, 'Peak Sort Space Used'], '"NN"', false);
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
end loop;
|
|
|
|
|
end loop;
|
|
|
|
|
nodes := nodes || node;
|
|
|
|
|
end loop;
|
|
|
|
|
return nodes;
|
|
|
|
|
end;
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text)
|
|
|
|
|
returns bool language plpgsql
|
|
|
|
|
as
|
|
|
|
|
$$
|
|
|
|
|
declare
|
|
|
|
|
node jsonb;
|
|
|
|
|
group_stats jsonb;
|
|
|
|
|
group_key text;
|
|
|
|
|
space_key text;
|
|
|
|
|
begin
|
|
|
|
|
for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
|
2020-05-12 14:04:39 -04:00
|
|
|
for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
group_stats := node->group_key;
|
|
|
|
|
for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
|
2020-04-07 12:03:24 -04:00
|
|
|
if (group_stats->space_key->'Peak Sort Space Used')::bigint < (group_stats->space_key->'Peak Sort Space Used')::bigint then
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
raise exception '% has invalid max space < average space', group_key;
|
|
|
|
|
end if;
|
|
|
|
|
end loop;
|
|
|
|
|
end loop;
|
|
|
|
|
end loop;
|
|
|
|
|
return true;
|
|
|
|
|
end;
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
-- A single large group tested around each mode transition point.
|
2020-04-08 12:30:11 -04:00
|
|
|
insert into t(a, b) select i/100 + 1, i + 1 from generate_series(0, 999) n(i);
|
|
|
|
|
analyze t;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
delete from t;
|
|
|
|
|
|
|
|
|
|
-- An initial large group followed by a small group.
|
2020-04-08 12:30:11 -04:00
|
|
|
insert into t(a, b) select i/50 + 1, i + 1 from generate_series(0, 999) n(i);
|
|
|
|
|
analyze t;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 55;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 55;
|
|
|
|
|
-- Test EXPLAIN ANALYZE with only a fullsort group.
|
|
|
|
|
select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55');
|
|
|
|
|
select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55'));
|
|
|
|
|
select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55');
|
|
|
|
|
delete from t;
|
|
|
|
|
|
|
|
|
|
-- An initial small group followed by a large group.
|
2020-04-08 12:30:11 -04:00
|
|
|
insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 1000) n(i);
|
|
|
|
|
analyze t;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 70;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 70;
|
2021-02-04 19:12:09 -05:00
|
|
|
-- Checks case where we hit a group boundary at the last tuple of a batch.
|
2021-02-15 10:17:58 -05:00
|
|
|
-- Because the full sort state is bounded, we scan 64 tuples (the mode
|
|
|
|
|
-- transition point) but only retain 5. Thus when we transition modes, all
|
|
|
|
|
-- tuples in the full sort state have different prefix keys.
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 5;
|
2021-02-04 19:12:09 -05:00
|
|
|
select * from (select * from t order by a) s order by a, b limit 5;
|
|
|
|
|
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
-- Test rescan.
|
|
|
|
|
begin;
|
|
|
|
|
-- We force the planner to choose a plan with incremental sort on the right side
|
|
|
|
|
-- of a nested loop join node. That way we trigger the rescan code path.
|
|
|
|
|
set local enable_hashjoin = off;
|
|
|
|
|
set local enable_mergejoin = off;
|
|
|
|
|
set local enable_material = off;
|
|
|
|
|
set local enable_sort = off;
|
|
|
|
|
explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
|
|
|
|
|
select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
|
|
|
|
|
rollback;
|
|
|
|
|
-- Test EXPLAIN ANALYZE with both fullsort and presorted groups.
|
|
|
|
|
select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70');
|
|
|
|
|
select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70'));
|
|
|
|
|
select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70');
|
|
|
|
|
delete from t;
|
|
|
|
|
|
|
|
|
|
-- Small groups of 10 tuples each tested around each mode transition point.
|
2020-04-08 12:30:11 -04:00
|
|
|
insert into t(a, b) select i / 10, i from generate_series(1, 1000) n(i);
|
|
|
|
|
analyze t;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
delete from t;
|
|
|
|
|
|
|
|
|
|
-- Small groups of only 1 tuple each tested around each mode transition point.
|
2020-04-08 12:30:11 -04:00
|
|
|
insert into t(a, b) select i, i from generate_series(1, 1000) n(i);
|
|
|
|
|
analyze t;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 31;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 32;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 33;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 65;
|
|
|
|
|
explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
select * from (select * from t order by a) s order by a, b limit 66;
|
|
|
|
|
delete from t;
|
|
|
|
|
|
|
|
|
|
drop table t;
|
|
|
|
|
|
|
|
|
|
-- Incremental sort vs. parallel queries
|
|
|
|
|
set min_parallel_table_scan_size = '1kB';
|
|
|
|
|
set min_parallel_index_scan_size = '1kB';
|
|
|
|
|
set parallel_setup_cost = 0;
|
|
|
|
|
set parallel_tuple_cost = 0;
|
2020-04-06 17:58:10 -04:00
|
|
|
set max_parallel_workers_per_gather = 2;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
|
|
|
|
|
create table t (a int, b int, c int);
|
Revert "Optimize order of GROUP BY keys".
This reverts commit db0d67db2401eb6238ccc04c6407a4fd4f985832 and
several follow-on fixes. The idea of making a cost-based choice
of the order of the sorting columns is not fundamentally unsound,
but it requires cost information and data statistics that we don't
really have. For example, relying on procost to distinguish the
relative costs of different sort comparators is pretty pointless
so long as most such comparator functions are labeled with cost 1.0.
Moreover, estimating the number of comparisons done by Quicksort
requires more than just an estimate of the number of distinct values
in the input: you also need some idea of the sizes of the larger
groups, if you want an estimate that's good to better than a factor of
three or so. That's data that's often unknown or not very reliable.
Worse, to arrive at estimates of the number of calls made to the
lower-order-column comparison functions, the code needs to make
estimates of the numbers of distinct values of multiple columns,
which are necessarily even less trustworthy than per-column stats.
Even if all the inputs are perfectly reliable, the cost algorithm
as-implemented cannot offer useful information about how to order
sorting columns beyond the point at which the average group size
is estimated to drop to 1.
Close inspection of the code added by db0d67db2 shows that there
are also multiple small bugs. These could have been fixed, but
there's not much point if we don't trust the estimates to be
accurate in-principle.
Finally, the changes in cost_sort's behavior made for very large
changes (often a factor of 2 or so) in the cost estimates for all
sorting operations, not only those for multi-column GROUP BY.
That naturally changes plan choices in many situations, and there's
precious little evidence to show that the changes are for the better.
Given the above doubts about whether the new estimates are really
trustworthy, it's hard to summon much confidence that these changes
are better on the average.
Since we're hard up against the release deadline for v15, let's
revert these changes for now. We can always try again later.
Note: in v15, I left T_PathKeyInfo in place in nodes.h even though
it's unreferenced. Removing it would be an ABI break, and it seems
a bit late in the release cycle for that.
Discussion: https://postgr.es/m/TYAPR01MB586665EB5FB2C3807E893941F5579@TYAPR01MB5866.jpnprd01.prod.outlook.com
2022-10-03 10:56:16 -04:00
|
|
|
insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i);
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
create index on t (a);
|
|
|
|
|
analyze t;
|
|
|
|
|
|
2020-07-05 05:41:52 -04:00
|
|
|
set enable_incremental_sort = off;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
|
|
|
|
|
|
2020-07-05 05:41:52 -04:00
|
|
|
set enable_incremental_sort = on;
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
|
|
|
|
|
|
2020-04-22 18:15:24 -04:00
|
|
|
-- Incremental sort vs. set operations with varno 0
|
|
|
|
|
set enable_hashagg to off;
|
|
|
|
|
explain (costs off) select * from t union select * from t order by 1,3;
|
|
|
|
|
|
2020-12-21 12:09:57 -05:00
|
|
|
-- Full sort, not just incremental sort can be pushed below a gather merge path
|
|
|
|
|
-- by generate_useful_gather_paths.
|
|
|
|
|
explain (costs off) select distinct a,b from t;
|
|
|
|
|
|
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when
the input is already sorted by a prefix of the requested sort keys. For
example when the relation is already sorted by (key1, key2) and we need
to sort it by (key1, key2, key3) we can simply split the input rows into
groups having equal values in (key1, key2), and only sort/compare the
remaining column key3.
This has a number of benefits:
- Reduced memory consumption, because only a single group (determined by
values in the sorted prefix) needs to be kept in memory. This may also
eliminate the need to spill to disk.
- Lower startup cost, because Incremental Sort produce results after each
prefix group, which is beneficial for plans where startup cost matters
(like for example queries with LIMIT clause).
We consider both Sort and Incremental Sort, and decide based on costing.
The implemented algorithm operates in two different modes:
- Fetching a minimum number of tuples without check of equality on the
prefix keys, and sorting on all columns when safe.
- Fetching all tuples for a single prefix group and then sorting by
comparing only the remaining (non-prefix) keys.
We always start in the first mode, and employ a heuristic to switch into
the second mode if we believe it's beneficial - the goal is to minimize
the number of unnecessary comparions while keeping memory consumption
below work_mem.
This is a very old patch series. The idea was originally proposed by
Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the
patch was taken over by James Coleman, who wrote and rewrote most of the
current code.
There were many reviewers/contributors since 2013 - I've done my best to
pick the most active ones, and listed them in this commit message.
Author: James Coleman, Alexander Korotkov
Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov
Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com
Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
2020-04-06 15:33:28 -04:00
|
|
|
drop table t;
|
2020-11-03 14:07:23 -05:00
|
|
|
|
|
|
|
|
-- Sort pushdown can't go below where expressions are part of the rel target.
|
|
|
|
|
-- In particular this is interesting for volatile expressions which have to
|
|
|
|
|
-- go above joins since otherwise we'll incorrectly use expression evaluations
|
|
|
|
|
-- across multiple rows.
|
|
|
|
|
set enable_hashagg=off;
|
|
|
|
|
set enable_seqscan=off;
|
|
|
|
|
set enable_incremental_sort = off;
|
|
|
|
|
set parallel_tuple_cost=0;
|
|
|
|
|
set parallel_setup_cost=0;
|
|
|
|
|
set min_parallel_table_scan_size = 0;
|
|
|
|
|
set min_parallel_index_scan_size = 0;
|
|
|
|
|
|
|
|
|
|
-- Parallel sort below join.
|
|
|
|
|
explain (costs off) select distinct sub.unique1, stringu1
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub;
|
|
|
|
|
explain (costs off) select sub.unique1, stringu1
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub
|
|
|
|
|
order by 1, 2;
|
|
|
|
|
-- Parallel sort but with expression that can be safely generated at the base rel.
|
|
|
|
|
explain (costs off) select distinct sub.unique1, md5(stringu1)
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub;
|
|
|
|
|
explain (costs off) select sub.unique1, md5(stringu1)
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub
|
|
|
|
|
order by 1, 2;
|
2021-04-20 11:32:02 -04:00
|
|
|
-- Parallel sort with an aggregate that can be safely generated in parallel,
|
|
|
|
|
-- but we can't sort by partial aggregate values.
|
|
|
|
|
explain (costs off) select count(*)
|
|
|
|
|
from tenk1 t1
|
|
|
|
|
join tenk1 t2 on t1.unique1 = t2.unique2
|
|
|
|
|
join tenk1 t3 on t2.unique1 = t3.unique1
|
|
|
|
|
order by count(*);
|
2020-12-21 12:29:46 -05:00
|
|
|
-- Parallel sort but with expression (correlated subquery) that
|
|
|
|
|
-- is prohibited in parallel plans.
|
|
|
|
|
explain (costs off) select distinct
|
|
|
|
|
unique1,
|
|
|
|
|
(select t.unique1 from tenk1 where tenk1.unique1 = t.unique1)
|
|
|
|
|
from tenk1 t, generate_series(1, 1000);
|
|
|
|
|
explain (costs off) select
|
|
|
|
|
unique1,
|
|
|
|
|
(select t.unique1 from tenk1 where tenk1.unique1 = t.unique1)
|
|
|
|
|
from tenk1 t, generate_series(1, 1000)
|
|
|
|
|
order by 1, 2;
|
2020-11-03 14:07:23 -05:00
|
|
|
-- Parallel sort but with expression not available until the upper rel.
|
|
|
|
|
explain (costs off) select distinct sub.unique1, stringu1 || random()::text
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub;
|
|
|
|
|
explain (costs off) select sub.unique1, stringu1 || random()::text
|
|
|
|
|
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub
|
|
|
|
|
order by 1, 2;
|