2008-11-14 05:01:04 -05:00
|
|
|
#!/usr/bin/perl
|
2010-09-20 16:08:53 -04:00
|
|
|
# src/interfaces/ecpg/preproc/parse.pl
|
2018-11-02 08:56:16 -04:00
|
|
|
# parser generator for ecpg version 2
|
2008-11-14 05:01:04 -05:00
|
|
|
# call with backend parser as stdin
|
|
|
|
|
#
|
2022-01-07 19:04:57 -05:00
|
|
|
# Copyright (c) 2007-2022, PostgreSQL Global Development Group
|
2008-11-14 05:01:04 -05:00
|
|
|
#
|
|
|
|
|
# Written by Mike Aubury <mike.aubury@aubit.com>
|
2013-11-10 09:20:52 -05:00
|
|
|
# Michael Meskes <meskes@postgresql.org>
|
2012-07-04 21:47:49 -04:00
|
|
|
# Andy Colson <andy@squeakycode.net>
|
2008-11-14 05:01:04 -05:00
|
|
|
#
|
|
|
|
|
# Placed under the same license as PostgreSQL.
|
|
|
|
|
#
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
use strict;
|
|
|
|
|
use warnings;
|
2022-07-18 14:59:03 -04:00
|
|
|
use Getopt::Long;
|
2011-06-14 00:34:00 -04:00
|
|
|
|
2022-07-18 14:59:03 -04:00
|
|
|
my $srcdir = '.';
|
|
|
|
|
my $outfile = '';
|
|
|
|
|
my $parser = '';
|
|
|
|
|
|
|
|
|
|
GetOptions(
|
|
|
|
|
'srcdir=s' => \$srcdir,
|
|
|
|
|
'output=s' => \$outfile,
|
|
|
|
|
'parser=s' => \$parser,) or die "wrong arguments";
|
|
|
|
|
|
|
|
|
|
# open parser / output file early, to raise errors early
|
2022-07-18 17:53:02 -04:00
|
|
|
open(my $parserfh, '<', $parser) or die "could not open parser file $parser";
|
|
|
|
|
open(my $outfh, '>', $outfile) or die "could not open output file $outfile";
|
2008-11-14 05:01:04 -05:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
my $copymode = 0;
|
|
|
|
|
my $brace_indent = 0;
|
|
|
|
|
my $yaccmode = 0;
|
2019-02-24 12:51:50 -05:00
|
|
|
my $in_rule = 0;
|
2011-06-14 00:34:00 -04:00
|
|
|
my $header_included = 0;
|
|
|
|
|
my $feature_not_supported = 0;
|
|
|
|
|
my $tokenmode = 0;
|
2008-11-14 05:01:04 -05:00
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
my (%buff, $infield, $comment, %tokens, %addons);
|
|
|
|
|
my ($stmt_mode, @fields);
|
2022-07-18 19:43:16 -04:00
|
|
|
my $line = '';
|
|
|
|
|
my $non_term_id;
|
2008-11-14 05:01:04 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# some token have to be replaced by other symbols
|
|
|
|
|
# either in the rule
|
2011-06-14 00:34:00 -04:00
|
|
|
my %replace_token = (
|
|
|
|
|
'BCONST' => 'ecpg_bconst',
|
|
|
|
|
'FCONST' => 'ecpg_fconst',
|
|
|
|
|
'Sconst' => 'ecpg_sconst',
|
2020-11-07 15:03:44 -05:00
|
|
|
'XCONST' => 'ecpg_xconst',
|
2011-06-14 00:34:00 -04:00
|
|
|
'IDENT' => 'ecpg_ident',
|
2012-07-04 21:47:49 -04:00
|
|
|
'PARAM' => 'ecpg_param',);
|
2011-06-14 00:34:00 -04:00
|
|
|
|
2008-11-14 05:01:04 -05:00
|
|
|
# or in the block
|
2011-06-14 00:34:00 -04:00
|
|
|
my %replace_string = (
|
Make operator precedence follow the SQL standard more closely.
While the SQL standard is pretty vague on the overall topic of operator
precedence (because it never presents a unified BNF for all expressions),
it does seem reasonable to conclude from the spec for <boolean value
expression> that OR has the lowest precedence, then AND, then NOT, then IS
tests, then the six standard comparison operators, then everything else
(since any non-boolean operator in a WHERE clause would need to be an
argument of one of these).
We were only sort of on board with that: most notably, while "<" ">" and
"=" had properly low precedence, "<=" ">=" and "<>" were treated as generic
operators and so had significantly higher precedence. And "IS" tests were
even higher precedence than those, which is very clearly wrong per spec.
Another problem was that "foo NOT SOMETHING bar" constructs, such as
"x NOT LIKE y", were treated inconsistently because of a bison
implementation artifact: they had the documented precedence with respect
to operators to their right, but behaved like NOT (i.e., very low priority)
with respect to operators to their left.
Fixing the precedence issues is just a small matter of rearranging the
precedence declarations in gram.y, except for the NOT problem, which
requires adding an additional lookahead case in base_yylex() so that we
can attach a different token precedence to NOT LIKE and allied two-word
operators.
The bulk of this patch is not the bug fix per se, but adding logic to
parse_expr.c to allow giving warnings if an expression has changed meaning
because of these precedence changes. These warnings are off by default
and are enabled by the new GUC operator_precedence_warning. It's believed
that very few applications will be affected by these changes, but it was
agreed that a warning mechanism is essential to help debug any that are.
2015-03-11 13:22:52 -04:00
|
|
|
'NOT_LA' => 'not',
|
Improve parser's one-extra-token lookahead mechanism.
There are a couple of places in our grammar that fail to be strict LALR(1),
by requiring more than a single token of lookahead to decide what to do.
Up to now we've dealt with that by using a filter between the lexer and
parser that merges adjacent tokens into one in the places where two tokens
of lookahead are necessary. But that creates a number of user-visible
anomalies, for instance that you can't name a CTE "ordinality" because
"WITH ordinality AS ..." triggers folding of WITH and ORDINALITY into one
token. I realized that there's a better way.
In this patch, we still do the lookahead basically as before, but we never
merge the second token into the first; we replace just the first token by
a special lookahead symbol when one of the lookahead pairs is seen.
This requires a couple extra productions in the grammar, but it involves
fewer special tokens, so that the grammar tables come out a bit smaller
than before. The filter logic is no slower than before, perhaps a bit
faster.
I also fixed the filter logic so that when backing up after a lookahead,
the current token's terminator is correctly restored; this eliminates some
weird behavior in error message issuance, as is shown by the one change in
existing regression test outputs.
I believe that this patch entirely eliminates odd behaviors caused by
lookahead for WITH. It doesn't really improve the situation for NULLS
followed by FIRST/LAST unfortunately: those sequences still act like a
reserved word, even though there are cases where they should be seen as two
ordinary identifiers, eg "SELECT nulls first FROM ...". I experimented
with additional grammar hacks but couldn't find any simple solution for
that. Still, this is better than before, and it seems much more likely
that we *could* somehow solve the NULLS case on the basis of this filter
behavior than the previous one.
2015-02-24 17:53:42 -05:00
|
|
|
'NULLS_LA' => 'nulls',
|
|
|
|
|
'WITH_LA' => 'with',
|
SQL/JSON constructors
This patch introduces the SQL/JSON standard constructors for JSON:
JSON()
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
For the most part these functions provide facilities that mimic
existing json/jsonb functions. However, they also offer some useful
additional functionality. In addition to text input, the JSON() function
accepts bytea input, which it will decode and constuct a json value from.
The other functions provide useful options for handling duplicate keys
and null values.
This series of patches will be followed by a consolidated documentation
patch.
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
2022-03-03 13:02:10 -05:00
|
|
|
'WITH_LA_UNIQUE' => 'with',
|
|
|
|
|
'WITHOUT_LA' => 'without',
|
2011-06-14 00:34:00 -04:00
|
|
|
'TYPECAST' => '::',
|
|
|
|
|
'DOT_DOT' => '..',
|
2015-03-10 11:48:34 -04:00
|
|
|
'COLON_EQUALS' => ':=',
|
Make operator precedence follow the SQL standard more closely.
While the SQL standard is pretty vague on the overall topic of operator
precedence (because it never presents a unified BNF for all expressions),
it does seem reasonable to conclude from the spec for <boolean value
expression> that OR has the lowest precedence, then AND, then NOT, then IS
tests, then the six standard comparison operators, then everything else
(since any non-boolean operator in a WHERE clause would need to be an
argument of one of these).
We were only sort of on board with that: most notably, while "<" ">" and
"=" had properly low precedence, "<=" ">=" and "<>" were treated as generic
operators and so had significantly higher precedence. And "IS" tests were
even higher precedence than those, which is very clearly wrong per spec.
Another problem was that "foo NOT SOMETHING bar" constructs, such as
"x NOT LIKE y", were treated inconsistently because of a bison
implementation artifact: they had the documented precedence with respect
to operators to their right, but behaved like NOT (i.e., very low priority)
with respect to operators to their left.
Fixing the precedence issues is just a small matter of rearranging the
precedence declarations in gram.y, except for the NOT problem, which
requires adding an additional lookahead case in base_yylex() so that we
can attach a different token precedence to NOT LIKE and allied two-word
operators.
The bulk of this patch is not the bug fix per se, but adding logic to
parse_expr.c to allow giving warnings if an expression has changed meaning
because of these precedence changes. These warnings are off by default
and are enabled by the new GUC operator_precedence_warning. It's believed
that very few applications will be affected by these changes, but it was
agreed that a warning mechanism is essential to help debug any that are.
2015-03-11 13:22:52 -04:00
|
|
|
'EQUALS_GREATER' => '=>',
|
|
|
|
|
'LESS_EQUALS' => '<=',
|
|
|
|
|
'GREATER_EQUALS' => '>=',
|
|
|
|
|
'NOT_EQUALS' => '<>',);
|
2008-11-14 05:01:04 -05:00
|
|
|
|
|
|
|
|
# specific replace_types for specific non-terminals - never include the ':'
|
|
|
|
|
# ECPG-only replace_types are defined in ecpg-replace_types
|
2011-06-14 00:34:00 -04:00
|
|
|
my %replace_types = (
|
|
|
|
|
'PrepareStmt' => '<prep>',
|
2019-05-21 22:58:29 -04:00
|
|
|
'ExecuteStmt' => '<exec>',
|
2011-06-14 00:34:00 -04:00
|
|
|
'opt_array_bounds' => '<index>',
|
|
|
|
|
|
|
|
|
|
# "ignore" means: do not create type and rules for this non-term-id
|
2021-01-04 11:03:22 -05:00
|
|
|
'parse_toplevel' => 'ignore',
|
2011-06-14 00:34:00 -04:00
|
|
|
'stmtmulti' => 'ignore',
|
|
|
|
|
'CreateAsStmt' => 'ignore',
|
|
|
|
|
'DeallocateStmt' => 'ignore',
|
|
|
|
|
'ColId' => 'ignore',
|
|
|
|
|
'type_function_name' => 'ignore',
|
|
|
|
|
'ColLabel' => 'ignore',
|
2021-01-04 11:52:00 -05:00
|
|
|
'Sconst' => 'ignore',
|
2021-01-22 19:25:39 -05:00
|
|
|
'opt_distinct_clause' => 'ignore',
|
2021-01-04 11:52:00 -05:00
|
|
|
'PLpgSQL_Expr' => 'ignore',
|
|
|
|
|
'PLAssignStmt' => 'ignore',
|
|
|
|
|
'plassign_target' => 'ignore',
|
|
|
|
|
'plassign_equals' => 'ignore',);
|
2008-11-14 05:01:04 -05:00
|
|
|
|
Refactor ecpg grammar so that it uses the core grammar's unreserved_keyword
list, minus a few specific words that have to be treated specially. This
replaces a hard-wired list of keywords that would have needed manual
maintenance, and was not getting it. The 8.4 coding was already missing
these words, causing ecpg to incorrectly treat them as reserved words:
CALLED, CATALOG, DEFINER, ENUM, FOLLOWING, INVOKER, OPTIONS, PARTITION,
PRECEDING, RANGE, SECURITY, SERVER, UNBOUNDED, WRAPPER. In HEAD we were
additionally missing COMMENTS, FUNCTIONS, SEQUENCES, TABLES.
Per gripe from Bosco Rama.
2009-11-21 00:44:05 -05:00
|
|
|
# these replace_line commands excise certain keywords from the core keyword
|
|
|
|
|
# lists. Be sure to account for these in ColLabel and related productions.
|
2011-06-14 00:34:00 -04:00
|
|
|
my %replace_line = (
|
|
|
|
|
'unreserved_keywordCONNECTION' => 'ignore',
|
|
|
|
|
'unreserved_keywordCURRENT_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordDAY_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordHOUR_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordINPUT_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordMINUTE_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordMONTH_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordSECOND_P' => 'ignore',
|
|
|
|
|
'unreserved_keywordYEAR_P' => 'ignore',
|
|
|
|
|
'col_name_keywordCHAR_P' => 'ignore',
|
|
|
|
|
'col_name_keywordINT_P' => 'ignore',
|
|
|
|
|
'col_name_keywordVALUES' => 'ignore',
|
|
|
|
|
'reserved_keywordTO' => 'ignore',
|
|
|
|
|
'reserved_keywordUNION' => 'ignore',
|
|
|
|
|
|
|
|
|
|
# some other production rules have to be ignored or replaced
|
|
|
|
|
'fetch_argsFORWARDopt_from_incursor_name' => 'ignore',
|
|
|
|
|
'fetch_argsBACKWARDopt_from_incursor_name' => 'ignore',
|
|
|
|
|
"opt_array_boundsopt_array_bounds'['Iconst']'" => 'ignore',
|
2012-07-04 21:47:49 -04:00
|
|
|
'VariableShowStmtSHOWvar_name' => 'SHOW var_name ecpg_into',
|
2011-06-14 00:34:00 -04:00
|
|
|
'VariableShowStmtSHOWTIMEZONE' => 'SHOW TIME ZONE ecpg_into',
|
2012-07-04 21:47:49 -04:00
|
|
|
'VariableShowStmtSHOWTRANSACTIONISOLATIONLEVEL' =>
|
|
|
|
|
'SHOW TRANSACTION ISOLATION LEVEL ecpg_into',
|
|
|
|
|
'VariableShowStmtSHOWSESSIONAUTHORIZATION' =>
|
|
|
|
|
'SHOW SESSION AUTHORIZATION ecpg_into',
|
|
|
|
|
'returning_clauseRETURNINGtarget_list' =>
|
2017-08-14 05:29:34 -04:00
|
|
|
'RETURNING target_list opt_ecpg_into',
|
2012-07-04 21:47:49 -04:00
|
|
|
'ExecuteStmtEXECUTEnameexecute_param_clause' =>
|
|
|
|
|
'EXECUTE prepared_name execute_param_clause execute_rest',
|
2019-05-21 22:58:29 -04:00
|
|
|
'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
|
|
|
|
|
=> 'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
|
|
|
|
|
'ExecuteStmtCREATEOptTempTABLEIF_PNOTEXISTScreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
|
|
|
|
|
=> 'CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
|
2012-07-04 21:47:49 -04:00
|
|
|
'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' =>
|
|
|
|
|
'PREPARE prepared_name prep_type_clause AS PreparableStmt',
|
2019-05-21 22:58:29 -04:00
|
|
|
'var_nameColId' => 'ECPGColId');
|
2011-06-14 00:34:00 -04:00
|
|
|
|
|
|
|
|
preload_addons();
|
|
|
|
|
|
|
|
|
|
main();
|
|
|
|
|
|
|
|
|
|
dump_buffer('header');
|
|
|
|
|
dump_buffer('tokens');
|
|
|
|
|
dump_buffer('types');
|
|
|
|
|
dump_buffer('ecpgtype');
|
|
|
|
|
dump_buffer('orig_tokens');
|
2022-07-18 14:59:03 -04:00
|
|
|
print $outfh '%%', "\n";
|
|
|
|
|
print $outfh 'prog: statements;', "\n";
|
2011-06-14 00:34:00 -04:00
|
|
|
dump_buffer('rules');
|
2012-07-04 21:47:49 -04:00
|
|
|
include_file('trailer', 'ecpg.trailer');
|
2011-06-14 00:34:00 -04:00
|
|
|
dump_buffer('trailer');
|
|
|
|
|
|
2022-07-18 14:59:03 -04:00
|
|
|
close($parserfh);
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
sub main
|
|
|
|
|
{
|
2022-07-18 14:59:03 -04:00
|
|
|
line: while (<$parserfh>)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if (/ERRCODE_FEATURE_NOT_SUPPORTED/)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$feature_not_supported = 1;
|
|
|
|
|
next line;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
chomp;
|
|
|
|
|
|
2018-04-26 14:13:46 -04:00
|
|
|
# comment out the line below to make the result file match (blank line wise)
|
|
|
|
|
# the prior version.
|
|
|
|
|
#next if ($_ eq '');
|
|
|
|
|
|
|
|
|
|
# Dump the action for a rule -
|
|
|
|
|
# stmt_mode indicates if we are processing the 'stmt:'
|
|
|
|
|
# rule (mode==0 means normal, mode==1 means stmt:)
|
|
|
|
|
# flds are the fields to use. These may start with a '$' - in
|
|
|
|
|
# which case they are the result of a previous non-terminal
|
|
|
|
|
#
|
2018-07-09 09:10:44 -04:00
|
|
|
# if they don't start with a '$' then they are token name
|
2018-04-26 14:13:46 -04:00
|
|
|
#
|
|
|
|
|
# len is the number of fields in flds...
|
|
|
|
|
# leadin is the padding to apply at the beginning (just use for formatting)
|
2012-07-04 21:47:49 -04:00
|
|
|
|
|
|
|
|
if (/^%%/)
|
|
|
|
|
{
|
2011-06-14 00:34:00 -04:00
|
|
|
$tokenmode = 2;
|
|
|
|
|
$copymode = 1;
|
|
|
|
|
$yaccmode++;
|
2012-07-04 21:47:49 -04:00
|
|
|
$infield = 0;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
my $prec = 0;
|
2010-11-23 15:27:50 -05:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# Make sure any braces are split
|
|
|
|
|
s/{/ { /g;
|
|
|
|
|
s/}/ } /g;
|
|
|
|
|
|
|
|
|
|
# Any comments are split
|
|
|
|
|
s|\/\*| /* |g;
|
|
|
|
|
s|\*\/| */ |g;
|
|
|
|
|
|
|
|
|
|
# Now split the line into individual fields
|
|
|
|
|
my @arr = split(' ');
|
2008-11-14 05:01:04 -05:00
|
|
|
|
2022-07-18 19:43:16 -04:00
|
|
|
if (!@arr)
|
|
|
|
|
{
|
|
|
|
|
# empty line: in tokenmode 1, emit an empty line, else ignore
|
|
|
|
|
if ($tokenmode == 1)
|
|
|
|
|
{
|
|
|
|
|
add_to_buffer('orig_tokens', '');
|
|
|
|
|
}
|
|
|
|
|
next line;
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[0] eq '%token' && $tokenmode == 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$tokenmode = 1;
|
2012-07-04 21:47:49 -04:00
|
|
|
include_file('tokens', 'ecpg.tokens');
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($arr[0] eq '%type' && $header_included == 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
include_file('header', 'ecpg.header');
|
|
|
|
|
include_file('ecpgtype', 'ecpg.type');
|
2011-06-14 00:34:00 -04:00
|
|
|
$header_included = 1;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($tokenmode == 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
my $str = '';
|
2011-06-14 00:34:00 -04:00
|
|
|
my $prior = '';
|
|
|
|
|
for my $a (@arr)
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($a eq '/*')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$comment++;
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($a eq '*/')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$comment--;
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($comment)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if (substr($a, 0, 1) eq '<')
|
|
|
|
|
{
|
2011-06-14 00:34:00 -04:00
|
|
|
next;
|
|
|
|
|
|
|
|
|
|
# its a type
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
$tokens{$a} = 1;
|
2011-06-14 00:34:00 -04:00
|
|
|
|
|
|
|
|
$str = $str . ' ' . $a;
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($a eq 'IDENT' && $prior eq '%nonassoc')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
Reduce size of backend scanner's tables.
Previously, the core scanner's yy_transition[] array had 37045 elements.
Since that number is larger than INT16_MAX, Flex generated the array to
contain 32-bit integers. By reimplementing some of the bulkier scanner
rules, this patch reduces the array to 20495 elements. The much smaller
total length, combined with the consequent use of 16-bit integers for
the array elements reduces the binary size by over 200kB. This was
accomplished in two ways:
1. Consolidate handling of quote continuations into a new start condition,
rather than duplicating that logic for five different string types.
2. Treat Unicode strings and identifiers followed by a UESCAPE sequence
as three separate tokens, rather than one. The logic to de-escape
Unicode strings is moved to the filter code in parser.c, which already
had the ability to provide special processing for token sequences.
While we could have implemented the conversion in the grammar, that
approach was rejected for performance and maintainability reasons.
Performance in microbenchmarks of raw parsing seems equal or slightly
faster in most cases, and it's reasonable to expect that in real-world
usage (with more competition for the CPU cache) there will be a larger
win. The exception is UESCAPE sequences; lexing those is about 10%
slower, primarily because the scanner now has to be called three times
rather than one. This seems acceptable since that feature is very
rarely used.
The psql and epcg lexers are likewise modified, primarily because we
want to keep them all in sync. Since those lexers don't use the
space-hogging -CF option, the space savings is much less, but it's
still good for perhaps 10kB apiece.
While at it, merge the ecpg lexer's handling of C-style comments used
in SQL and in C. Those have different rules regarding nested comments,
but since we already have the ability to keep track of the previous
start condition, we can use that to handle both cases within a single
start condition. This matches the core scanner more closely.
John Naylor
Discussion: https://postgr.es/m/CACPNZCvaoa3EgVWm5yZhcSTX6RAtaLgniCPcBVOCwm8h3xpWkw@mail.gmail.com
2020-01-13 15:04:31 -05:00
|
|
|
# add more tokens to the list
|
|
|
|
|
$str = $str . "\n%nonassoc CSTRING";
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
$prior = $a;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('orig_tokens', $str);
|
2011-06-14 00:34:00 -04:00
|
|
|
next line;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-09 09:10:44 -04:00
|
|
|
# Don't worry about anything if we're not in the right section of gram.y
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($yaccmode != 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
next line;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
|
|
|
|
|
# Go through each field in turn
|
2012-07-04 21:47:49 -04:00
|
|
|
for (
|
|
|
|
|
my $fieldIndexer = 0;
|
|
|
|
|
$fieldIndexer < scalar(@arr);
|
|
|
|
|
$fieldIndexer++)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[$fieldIndexer] eq '*/' && $comment)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$comment = 0;
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($comment)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($arr[$fieldIndexer] eq '/*')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# start of a multiline comment
|
|
|
|
|
$comment = 1;
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($arr[$fieldIndexer] eq '//')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
next line;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($arr[$fieldIndexer] eq '}')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$brace_indent--;
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($arr[$fieldIndexer] eq '{')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$brace_indent++;
|
|
|
|
|
next;
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($brace_indent > 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[$fieldIndexer] eq ';')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($copymode)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($infield)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
dump_line($stmt_mode, \@fields);
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', ";\n\n");
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$copymode = 1;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
@fields = ();
|
|
|
|
|
$infield = 0;
|
|
|
|
|
$line = '';
|
2019-02-24 12:51:50 -05:00
|
|
|
$in_rule = 0;
|
2011-06-14 00:34:00 -04:00
|
|
|
next;
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[$fieldIndexer] eq '|')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($copymode)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($infield)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
$infield = $infield + dump_line($stmt_mode, \@fields);
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($infield > 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$line = '| ';
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
@fields = ();
|
|
|
|
|
next;
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if (exists $replace_token{ $arr[$fieldIndexer] })
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$arr[$fieldIndexer] = $replace_token{ $arr[$fieldIndexer] };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Are we looking at a declaration of a non-terminal ?
|
2012-07-04 21:47:49 -04:00
|
|
|
if (($arr[$fieldIndexer] =~ /[A-Za-z0-9]+:/)
|
2022-07-18 19:43:16 -04:00
|
|
|
|| ( $fieldIndexer + 1 < scalar(@arr)
|
|
|
|
|
&& $arr[ $fieldIndexer + 1 ] eq ':'))
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$non_term_id = $arr[$fieldIndexer];
|
|
|
|
|
$non_term_id =~ tr/://d;
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if (not defined $replace_types{$non_term_id})
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$replace_types{$non_term_id} = '<str>';
|
|
|
|
|
$copymode = 1;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($replace_types{$non_term_id} eq 'ignore')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$copymode = 0;
|
|
|
|
|
$line = '';
|
|
|
|
|
next line;
|
|
|
|
|
}
|
|
|
|
|
$line = $line . ' ' . $arr[$fieldIndexer];
|
|
|
|
|
|
|
|
|
|
# Do we have the : attached already ?
|
|
|
|
|
# If yes, we'll have already printed the ':'
|
2012-07-04 21:47:49 -04:00
|
|
|
if (!($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:'))
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# Consume the ':' which is next...
|
|
|
|
|
$line = $line . ':';
|
|
|
|
|
$fieldIndexer++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Special mode?
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($non_term_id eq 'stmt')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$stmt_mode = 1;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$stmt_mode = 0;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
my $tstr =
|
|
|
|
|
'%type '
|
|
|
|
|
. $replace_types{$non_term_id} . ' '
|
|
|
|
|
. $non_term_id;
|
|
|
|
|
add_to_buffer('types', $tstr);
|
2011-06-14 00:34:00 -04:00
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($copymode)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', $line);
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
$line = '';
|
|
|
|
|
@fields = ();
|
|
|
|
|
$infield = 1;
|
2019-02-24 12:51:50 -05:00
|
|
|
die "unterminated rule at grammar line $.\n"
|
|
|
|
|
if $in_rule;
|
|
|
|
|
$in_rule = 1;
|
2011-06-14 00:34:00 -04:00
|
|
|
next;
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
elsif ($copymode)
|
|
|
|
|
{
|
2011-06-14 00:34:00 -04:00
|
|
|
$line = $line . ' ' . $arr[$fieldIndexer];
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[$fieldIndexer] eq '%prec')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$prec = 1;
|
|
|
|
|
next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ( $copymode
|
|
|
|
|
&& !$prec
|
|
|
|
|
&& !$comment
|
2022-07-18 19:43:16 -04:00
|
|
|
&& $fieldIndexer < scalar(@arr)
|
2012-07-04 21:47:49 -04:00
|
|
|
&& length($arr[$fieldIndexer])
|
|
|
|
|
&& $infield)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($arr[$fieldIndexer] ne 'Op'
|
2022-07-18 19:43:16 -04:00
|
|
|
&& (( defined $tokens{ $arr[$fieldIndexer] }
|
|
|
|
|
&& $tokens{ $arr[$fieldIndexer] } > 0)
|
2012-07-04 21:47:49 -04:00
|
|
|
|| $arr[$fieldIndexer] =~ /'.+'/)
|
|
|
|
|
|| $stmt_mode == 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
my $S;
|
2012-07-04 21:47:49 -04:00
|
|
|
if (exists $replace_string{ $arr[$fieldIndexer] })
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$S = $replace_string{ $arr[$fieldIndexer] };
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$S = $arr[$fieldIndexer];
|
|
|
|
|
}
|
|
|
|
|
$S =~ s/_P//g;
|
|
|
|
|
$S =~ tr/'//d;
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($stmt_mode == 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
push(@fields, $S);
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
push(@fields, lc($S));
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
push(@fields, '$' . (scalar(@fields) + 1));
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
}
|
2019-02-24 12:51:50 -05:00
|
|
|
die "unterminated rule at end of grammar\n"
|
|
|
|
|
if $in_rule;
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
|
|
|
|
|
# append a file onto a buffer.
|
|
|
|
|
# Arguments: buffer_name, filename (without path)
|
2012-07-04 21:47:49 -04:00
|
|
|
sub include_file
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
my ($buffer, $filename) = @_;
|
2022-07-18 14:59:03 -04:00
|
|
|
my $full = "$srcdir/$filename";
|
2011-06-14 00:34:00 -04:00
|
|
|
open(my $fh, '<', $full) or die;
|
2012-07-04 21:47:49 -04:00
|
|
|
while (<$fh>)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
chomp;
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer($buffer, $_);
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
close($fh);
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sub include_addon
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
my ($buffer, $block, $fields, $stmt_mode) = @_;
|
2011-06-14 00:34:00 -04:00
|
|
|
my $rec = $addons{$block};
|
|
|
|
|
return 0 unless $rec;
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2022-07-18 19:43:16 -04:00
|
|
|
my $rectype = (defined $rec->{type}) ? $rec->{type} : '';
|
|
|
|
|
if ($rectype eq 'rule')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
dump_fields($stmt_mode, $fields, ' { ');
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2022-07-18 19:43:16 -04:00
|
|
|
elsif ($rectype eq 'addon')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', ' { ');
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#add_to_buffer( $stream, $_ );
|
2012-07-04 21:47:49 -04:00
|
|
|
#We have an array to add to the buffer, we'll add it ourself instead of
|
2011-06-14 00:34:00 -04:00
|
|
|
#calling add_to_buffer, which does not know about arrays
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
push(@{ $buff{$buffer} }, @{ $rec->{lines} });
|
|
|
|
|
|
2022-07-18 19:43:16 -04:00
|
|
|
if ($rectype eq 'addon')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
dump_fields($stmt_mode, $fields, '');
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# if we added something (ie there are lines in our array), return 1
|
|
|
|
|
return 1 if (scalar(@{ $rec->{lines} }) > 0);
|
|
|
|
|
return 0;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
|
|
|
|
|
# include_addon does this same thing, but does not call this
|
|
|
|
|
# sub... so if you change this, you need to fix include_addon too
|
|
|
|
|
# Pass: buffer_name, string_to_append
|
2012-07-04 21:47:49 -04:00
|
|
|
sub add_to_buffer
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
push(@{ $buff{ $_[0] } }, "$_[1]\n");
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
sub dump_buffer
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
my ($buffer) = @_;
|
2022-07-18 14:59:03 -04:00
|
|
|
print $outfh '/* ', $buffer, ' */', "\n";
|
2011-06-14 00:34:00 -04:00
|
|
|
my $ref = $buff{$buffer};
|
2022-07-18 14:59:03 -04:00
|
|
|
print $outfh @$ref;
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
sub dump_fields
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
my ($mode, $flds, $ln) = @_;
|
2011-06-14 00:34:00 -04:00
|
|
|
my $len = scalar(@$flds);
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($mode == 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
#Normal
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', $ln);
|
|
|
|
|
if ($feature_not_supported == 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# we found an unsupported feature, but we have to
|
|
|
|
|
# filter out ExecuteStmt: CREATE OptTemp TABLE ...
|
|
|
|
|
# because the warning there is only valid in some situations
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($flds->[0] ne 'create' || $flds->[2] ne 'table')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules',
|
2018-04-26 11:52:52 -04:00
|
|
|
'mmerror(PARSE_ERROR, ET_WARNING, "unsupported feature will be passed to server");'
|
2011-06-14 00:34:00 -04:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
$feature_not_supported = 0;
|
|
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($len == 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# We have no fields ?
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', ' $$=EMPTY; }');
|
|
|
|
|
}
|
|
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
|
|
|
|
# Go through each field and try to 'aggregate' the tokens
|
2011-06-14 00:34:00 -04:00
|
|
|
# into a single 'mm_strdup' where possible
|
|
|
|
|
my @flds_new;
|
|
|
|
|
my $str;
|
2012-07-04 21:47:49 -04:00
|
|
|
for (my $z = 0; $z < $len; $z++)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if (substr($flds->[$z], 0, 1) eq '$')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
push(@flds_new, $flds->[$z]);
|
|
|
|
|
next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
$str = $flds->[$z];
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
while (1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($z >= $len - 1
|
|
|
|
|
|| substr($flds->[ $z + 1 ], 0, 1) eq '$')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# We're at the end...
|
2012-07-04 21:47:49 -04:00
|
|
|
push(@flds_new, "mm_strdup(\"$str\")");
|
2011-06-14 00:34:00 -04:00
|
|
|
last;
|
|
|
|
|
}
|
|
|
|
|
$z++;
|
|
|
|
|
$str = $str . ' ' . $flds->[$z];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# So - how many fields did we end up with ?
|
|
|
|
|
$len = scalar(@flds_new);
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($len == 1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2017-02-06 04:33:58 -05:00
|
|
|
# Straight assignment
|
2011-06-14 00:34:00 -04:00
|
|
|
$str = ' $$ = ' . $flds_new[0] . ';';
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', $str);
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# Need to concatenate the results to form
|
|
|
|
|
# our final string
|
2012-07-04 21:47:49 -04:00
|
|
|
$str =
|
|
|
|
|
' $$ = cat_str(' . $len . ',' . join(',', @flds_new) . ');';
|
|
|
|
|
add_to_buffer('rules', $str);
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', '}');
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
else
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# we're in the stmt: rule
|
|
|
|
|
if ($len)
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# or just the statement ...
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules',
|
|
|
|
|
' { output_statement($1, 0, ECPGst_normal); }');
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
else
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', ' { $$ = NULL; }');
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2011-06-14 00:34:00 -04:00
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
|
|
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
sub dump_line
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
my ($stmt_mode, $fields) = @_;
|
2011-06-14 00:34:00 -04:00
|
|
|
my $block = $non_term_id . $line;
|
|
|
|
|
$block =~ tr/ |//d;
|
|
|
|
|
my $rep = $replace_line{$block};
|
|
|
|
|
if ($rep)
|
|
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
if ($rep eq 'ignore')
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
return 0;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
|
2012-07-04 21:47:49 -04:00
|
|
|
if (index($line, '|') != -1)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$line = '| ' . $rep;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
else
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
$line = $rep;
|
|
|
|
|
}
|
|
|
|
|
$block = $non_term_id . $line;
|
|
|
|
|
$block =~ tr/ |//d;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
add_to_buffer('rules', $line);
|
|
|
|
|
my $i = include_addon('rules', $block, $fields, $stmt_mode);
|
|
|
|
|
if ($i == 0)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
2012-07-04 21:47:49 -04:00
|
|
|
dump_fields($stmt_mode, $fields, ' { ');
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
return 1;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
|
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
=top
|
|
|
|
|
load addons into cache
|
2013-11-10 09:20:52 -05:00
|
|
|
%addons = {
|
2011-06-14 00:34:00 -04:00
|
|
|
stmtClosePortalStmt => { 'type' => 'block', 'lines' => [ "{", "if (INFORMIX_MODE)" ..., "}" ] },
|
|
|
|
|
stmtViewStmt => { 'type' => 'rule', 'lines' => [ "| ECPGAllocateDescr", ... ] }
|
|
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
=cut
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
sub preload_addons
|
|
|
|
|
{
|
2022-07-18 14:59:03 -04:00
|
|
|
my $filename = $srcdir . "/ecpg.addons";
|
2011-06-14 00:34:00 -04:00
|
|
|
open(my $fh, '<', $filename) or die;
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2018-04-26 14:13:46 -04:00
|
|
|
# there may be multiple lines starting ECPG: and then multiple lines of code.
|
|
|
|
|
# the code need to be add to all prior ECPG records.
|
2011-06-14 00:34:00 -04:00
|
|
|
my (@needsRules, @code, $record);
|
2012-07-04 21:47:49 -04:00
|
|
|
|
2011-06-14 00:34:00 -04:00
|
|
|
# there may be comments before the first ECPG line, skip them
|
|
|
|
|
my $skip = 1;
|
2012-07-04 21:47:49 -04:00
|
|
|
while (<$fh>)
|
2011-06-14 00:34:00 -04:00
|
|
|
{
|
|
|
|
|
if (/^ECPG:\s(\S+)\s?(\w+)?/)
|
|
|
|
|
{
|
|
|
|
|
$skip = 0;
|
|
|
|
|
if (@code)
|
|
|
|
|
{
|
|
|
|
|
for my $x (@needsRules)
|
|
|
|
|
{
|
|
|
|
|
push(@{ $x->{lines} }, @code);
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
@code = ();
|
2011-06-14 00:34:00 -04:00
|
|
|
@needsRules = ();
|
|
|
|
|
}
|
2012-07-04 21:47:49 -04:00
|
|
|
$record = {};
|
|
|
|
|
$record->{type} = $2;
|
2011-06-14 00:34:00 -04:00
|
|
|
$record->{lines} = [];
|
|
|
|
|
if (exists $addons{$1}) { die "Ga! there are dups!\n"; }
|
|
|
|
|
$addons{$1} = $record;
|
|
|
|
|
push(@needsRules, $record);
|
2012-07-04 21:47:49 -04:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
next if $skip;
|
|
|
|
|
push(@code, $_);
|
|
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2011-06-14 00:34:00 -04:00
|
|
|
close($fh);
|
|
|
|
|
if (@code)
|
|
|
|
|
{
|
|
|
|
|
for my $x (@needsRules)
|
|
|
|
|
{
|
|
|
|
|
push(@{ $x->{lines} }, @code);
|
|
|
|
|
}
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|
2018-05-27 09:08:42 -04:00
|
|
|
return;
|
2008-11-14 05:01:04 -05:00
|
|
|
}
|