# Exercising Bison on conflicts. -*- Autotest -*-
-# Copyright (C) 2002 Free Software Foundation, Inc.
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
+# Copyright (C) 2002-2005, 2007, 2009-2012 Free Software Foundation,
+# Inc.
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-
+#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
-# 02111-1307, USA.
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
AT_BANNER([[Conflicts.]])
e: 'e' | /* Nothing. */;
]])
-AT_CHECK([bison input.y -o input.c], 0, [],
-[[input.y:4.8: warning: rule never reduced because of conflicts: e: /* empty */
+AT_BISON_CHECK([-o input.c input.y], 0, [],
+[[input.y:4.9: warning: rule useless in parser due to conflicts: e: /* empty */
]])
AT_CLEANUP
AT_SETUP([%nonassoc and eof])
-AT_DATA([input.y],
+AT_BISON_OPTION_PUSHDEFS
+AT_DATA_GRAMMAR([input.y],
[[
%{
-#include <config.h>
-/* We don't need a perfect malloc for these tests. */
-#undef malloc
#include <stdio.h>
-
-#if STDC_HEADERS
-# include <stdlib.h>
-#endif
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
#define YYERROR_VERBOSE 1
-static void
-yyerror (const char *msg)
-{
- fprintf (stderr, "%s\n", msg);
- exit (1);
-}
-
+]AT_YYERROR_DEFINE[
/* The current argument. */
-static const char *input = NULL;
+static const char *input;
static int
yylex (void)
{
- /* No token stands for end of file. */
- if (input && *input)
- return *input++;
- else
- return 0;
+ static size_t toknum;
+ assert (toknum <= strlen (input));
+ return input[toknum++];
}
%}
int
main (int argc, const char *argv[])
{
- if (argc > 1)
- input = argv[1];
+ input = argc <= 1 ? "" : argv[1];
return yyparse ();
}
]])
+AT_BISON_OPTION_POPDEFS
-# Specify the output files to avoid problems on different file systems.
-AT_CHECK([bison input.y -o input.c])
+m4_pushdef([AT_NONASSOC_AND_EOF_CHECK],
+[AT_BISON_CHECK([$1[ -o input.c input.y]])
AT_COMPILE([input])
+m4_pushdef([AT_EXPECTING], [m4_if($2, [correct], [[, expecting $end]])])
+
AT_PARSER_CHECK([./input '0<0'])
-# FIXME: This is an actual bug, but a new one, in the sense that
-# no one has ever spotted it! The messages are *wrong*: there should
-# be nothing there, it should be expected eof.
AT_PARSER_CHECK([./input '0<0<0'], [1], [],
- [parse error, unexpected '<', expecting '<' or '>'
+ [syntax error, unexpected '<'AT_EXPECTING
])
AT_PARSER_CHECK([./input '0>0'])
AT_PARSER_CHECK([./input '0>0>0'], [1], [],
- [parse error, unexpected '>', expecting '<' or '>'
+ [syntax error, unexpected '>'AT_EXPECTING
])
AT_PARSER_CHECK([./input '0<0>0'], [1], [],
- [parse error, unexpected '>', expecting '<' or '>'
+ [syntax error, unexpected '>'AT_EXPECTING
])
+m4_popdef([AT_EXPECTING])])
+
+# Expected token list is missing.
+AT_NONASSOC_AND_EOF_CHECK([], [[incorrect]])
+
+# We must disable default reductions in inconsistent states in order to
+# have an explicit list of all expected tokens.
+AT_NONASSOC_AND_EOF_CHECK([[-Dlr.default-reductions=consistent]],
+ [[correct]])
+
+# lr.default-reductions=consistent happens to work for this test case.
+# However, for other grammars, lookahead sets can be merged for
+# different left contexts, so it is still possible to have an incorrect
+# expected list. Canonical LR is almost a general solution (that is, it
+# can fail only when %nonassoc is used), so make sure it gives the same
+# result as above.
+AT_NONASSOC_AND_EOF_CHECK([[-Dlr.type=canonical-lr]], [[correct]])
+
+# parse.lac=full is a completely general solution that does not require
+# any of the above sacrifices. Of course, it does not extend the
+# language-recognition power of LALR to (IE)LR, but it does ensure that
+# the reported list of expected tokens matches what the given parser
+# would have accepted in place of the unexpected token.
+AT_NONASSOC_AND_EOF_CHECK([[-Dparse.lac=full]], [[correct]])
+
+m4_popdef([AT_NONASSOC_AND_EOF_CHECK])
+
+AT_CLEANUP
+
+
+
+## -------------------------------------- ##
+## %error-verbose and consistent errors. ##
+## -------------------------------------- ##
+
+AT_SETUP([[%error-verbose and consistent errors]])
+
+m4_pushdef([AT_CONSISTENT_ERRORS_CHECK], [
+
+AT_BISON_OPTION_PUSHDEFS([$1])
+
+m4_pushdef([AT_YYLEX_PROTOTYPE],
+[AT_SKEL_CC_IF([[int yylex (yy::parser::semantic_type *lvalp)]],
+ [[int yylex (YYSTYPE *lvalp)]])])
+
+AT_SKEL_JAVA_IF([AT_DATA], [AT_DATA_GRAMMAR])([input.y],
+[AT_SKEL_JAVA_IF([[
+
+%code imports {
+ import java.io.IOException;
+}]], [[
+
+%code {]AT_SKEL_CC_IF([[
+ #include <cassert>
+ #include <string>]], [[
+ #include <assert.h>
+ #include <stdio.h>
+ ]AT_YYERROR_DECLARE])[
+ ]AT_YYLEX_PROTOTYPE[;
+ #define USE(Var)
+}
+
+]AT_SKEL_CC_IF([[%defines]], [[%define api.pure]])])[
+
+]$1[
+
+%error-verbose
+
+%%
+
+]$2[
+
+]AT_SKEL_JAVA_IF([[%code lexer {]], [[%%]])[
+
+/*--------.
+| yylex. |
+`--------*/]AT_SKEL_JAVA_IF([[
+
+public String input = "]$3[";
+public int index = 0;
+public int yylex ()
+{
+ if (index < input.length ())
+ return input.charAt (index++);
+ else
+ return 0;
+}
+public Object getLVal ()
+{
+ return new Integer(1);
+}]], [[
+
+]AT_YYLEX_PROTOTYPE[
+{
+ static char const *input = "]$3[";
+ *lvalp = 1;
+ return *input++;
+}]])[
+]AT_YYERROR_DEFINE[
+]AT_SKEL_JAVA_IF([[
+};
+
+%%]])[
+
+/*-------.
+| main. |
+`-------*/]AT_SKEL_JAVA_IF([[
+
+class input
+{
+ public static void main (String args[]) throws IOException
+ {
+ YYParser p = new YYParser ();
+ p.parse ();
+ }
+}]], [AT_SKEL_CC_IF([[
+
+int
+main (void)
+{
+ yy::parser parser;
+ return parser.parse ();
+}]], [[
+
+int
+main (void)
+{
+ return yyparse ();
+}]])])[
+]])
+
+AT_FULL_COMPILE([[input]])
+
+m4_pushdef([AT_EXPECTING], [m4_if($5, [ab], [[, expecting 'a' or 'b']],
+ $5, [a], [[, expecting 'a']],
+ $5, [b], [[, expecting 'b']])])
+
+AT_SKEL_JAVA_IF([AT_JAVA_PARSER_CHECK([[input]], [[0]]],
+ [AT_PARSER_CHECK([[./input]], [[1]]]),
+[[]],
+[[syntax error, unexpected ]$4[]AT_EXPECTING[
+]])
+
+m4_popdef([AT_EXPECTING])
+m4_popdef([AT_YYLEX_PROTOTYPE])
+AT_BISON_OPTION_POPDEFS
+
+])
+
+m4_pushdef([AT_PREVIOUS_STATE_GRAMMAR],
+[[%nonassoc 'a';
+
+start: consistent-error-on-a-a 'a' ;
+
+consistent-error-on-a-a:
+ 'a' default-reduction
+ | 'a' default-reduction 'a'
+ | 'a' shift
+ ;
+
+default-reduction: /*empty*/ ;
+shift: 'b' ;
+
+// Provide another context in which all rules are useful so that this
+// test case looks a little more realistic.
+start: 'b' consistent-error-on-a-a 'c' ;
+]])
+
+m4_pushdef([AT_PREVIOUS_STATE_INPUT], [[a]])
+
+# Unfortunately, no expected tokens are reported even though 'b' can be
+# accepted. Nevertheless, the main point of this test is to make sure
+# that at least the unexpected token is reported. In a previous version
+# of Bison, it wasn't reported because the error is detected in a
+# consistent state with an error action, and that case always triggered
+# the simple "syntax error" message.
+#
+# The point isn't to test IELR here, but state merging happens to
+# complicate this example.
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %glr-parser]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %language "c++"]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %language "java"]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[end of input]], [[none]])
+
+# Even canonical LR doesn't foresee the error for 'a'!
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %define lr.default-reductions consistent]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[ab]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %define lr.default-reductions accepting]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[ab]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type canonical-lr]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[ab]])
+
+# Only LAC gets it right.
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type canonical-lr
+ %define parse.lac full]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[b]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type ielr
+ %define parse.lac full]],
+ [AT_PREVIOUS_STATE_GRAMMAR],
+ [AT_PREVIOUS_STATE_INPUT],
+ [[$end]], [[b]])
+
+m4_popdef([AT_PREVIOUS_STATE_GRAMMAR])
+m4_popdef([AT_PREVIOUS_STATE_INPUT])
+
+m4_pushdef([AT_USER_ACTION_GRAMMAR],
+[[%nonassoc 'a';
+
+// If $$ = 0 here, then we know that the 'a' destructor is being invoked
+// incorrectly for the 'b' set in the semantic action below. All 'a'
+// tokens are returned by yylex, which sets $$ = 1.
+%destructor {
+ if (!$$)
+ fprintf (stderr, "Wrong destructor.\n");
+} 'a';
+
+// Rather than depend on an inconsistent state to induce reading a
+// lookahead as in the previous grammar, just assign the lookahead in a
+// semantic action. That lookahead isn't needed before either error
+// action is encountered. In a previous version of Bison, this was a
+// problem as it meant yychar was not translated into yytoken before
+// either error action. The second error action thus invoked a
+// destructor that it selected according to the incorrect yytoken. The
+// first error action would have reported an incorrect unexpected token
+// except that, due to the bug described in the previous grammar, the
+// unexpected token was not reported at all.
+start: error-reduce consistent-error 'a' { USE ($][3); } ;
+
+error-reduce:
+ 'a' 'a' consistent-reduction consistent-error 'a'
+ { USE (($][1, $][2, $][5)); }
+| 'a' error
+ { USE ($][1); }
+;
+
+consistent-reduction: /*empty*/ {
+ assert (yychar == ]AT_SKEL_CC_IF([[yyempty_]], [[YYEMPTY]])[);
+ yylval = 0;
+ yychar = 'b';
+} ;
+
+consistent-error:
+ 'a' { USE ($][1); }
+| /*empty*/ %prec 'a'
+;
+
+// Provide another context in which all rules are useful so that this
+// test case looks a little more realistic.
+start: 'b' consistent-error 'b' ;
+]])
+m4_pushdef([AT_USER_ACTION_INPUT], [[aa]])
+
+AT_CONSISTENT_ERRORS_CHECK([[]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [['b']], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%glr-parser]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [['b']], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%language "c++"]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [['b']], [[none]])
+# No Java test because yychar cannot be manipulated by users.
+
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.default-reductions consistent]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [['b']], [[none]])
+
+# Canonical LR doesn't foresee the error for 'a'!
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.default-reductions accepting]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [[$end]], [[a]])
+AT_CONSISTENT_ERRORS_CHECK([[%define lr.type canonical-lr]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [[$end]], [[a]])
+
+AT_CONSISTENT_ERRORS_CHECK([[%define parse.lac full]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [['b']], [[none]])
+AT_CONSISTENT_ERRORS_CHECK([[%define parse.lac full
+ %define lr.default-reductions accepting]],
+ [AT_USER_ACTION_GRAMMAR],
+ [AT_USER_ACTION_INPUT],
+ [[$end]], [[none]])
+
+m4_popdef([AT_USER_ACTION_GRAMMAR])
+m4_popdef([AT_USER_ACTION_INPUT])
+
+m4_popdef([AT_CONSISTENT_ERRORS_CHECK])
+
AT_CLEANUP
+## ------------------------------------------------------- ##
+## LAC: %nonassoc requires splitting canonical LR states. ##
+## ------------------------------------------------------- ##
+
+# This test case demonstrates that, when %nonassoc is used, canonical
+# LR(1) parser table construction followed by conflict resolution
+# without further state splitting is not always sufficient to produce a
+# parser that can detect all syntax errors as soon as possible on one
+# token of lookahead. However, LAC solves the problem completely even
+# with minimal LR parser tables.
+
+AT_SETUP([[LAC: %nonassoc requires splitting canonical LR states]])
+AT_BISON_OPTION_PUSHDEFS
+AT_DATA_GRAMMAR([[input.y]],
+[[%code {
+ #include <stdio.h>
+ ]AT_YYERROR_DECLARE[
+ ]AT_YYLEX_DECLARE[
+}
+
+%error-verbose
+%nonassoc 'a'
+
+%%
+
+start:
+ 'a' problem 'a' // First context.
+| 'b' problem 'b' // Second context.
+| 'c' reduce-nonassoc // Just makes reduce-nonassoc useful.
+;
+
+problem:
+ look reduce-nonassoc
+| look 'a'
+| look 'b'
+;
+
+// For the state reached after shifting the 'a' in these productions,
+// lookahead sets are the same in both the first and second contexts.
+// Thus, canonical LR reuses the same state for both contexts. However,
+// the lookahead 'a' for the reduction "look: 'a'" later becomes an
+// error action only in the first context. In order to immediately
+// detect the syntax error on 'a' here for only the first context, this
+// canonical LR state would have to be split into two states, and the
+// 'a' lookahead would have to be removed from only one of the states.
+look:
+ 'a' // Reduction lookahead set is always ['a', 'b'].
+| 'a' 'b'
+| 'a' 'c' // 'c' is forgotten as an expected token.
+;
+
+reduce-nonassoc: %prec 'a';
+
+%%
+]AT_YYERROR_DEFINE[
+]AT_YYLEX_DEFINE([aaa])[
+
+int
+main (void)
+{
+ return yyparse ();
+}
+]])
+AT_BISON_OPTION_POPDEFS
+
+# Show canonical LR's failure.
+AT_BISON_CHECK([[-Dlr.type=canonical-lr -o input.c input.y]],
+ [[0]], [[]],
+[[input.y: conflicts: 2 shift/reduce
+]])
+AT_COMPILE([[input]])
+AT_PARSER_CHECK([[./input]], [[1]], [[]],
+[[syntax error, unexpected 'a', expecting 'b'
+]])
+
+# It's corrected by LAC.
+AT_BISON_CHECK([[-Dlr.type=canonical-lr -Dparse.lac=full \
+ -o input.c input.y]], [[0]], [[]],
+[[input.y: conflicts: 2 shift/reduce
+]])
+AT_COMPILE([[input]])
+AT_PARSER_CHECK([[./input]], [[1]], [[]],
+[[syntax error, unexpected 'a', expecting 'b' or 'c'
+]])
+
+# IELR is sufficient when LAC is used.
+AT_BISON_CHECK([[-Dlr.type=ielr -Dparse.lac=full -o input.c input.y]],
+ [[0]], [[]],
+[[input.y: conflicts: 2 shift/reduce
+]])
+AT_COMPILE([[input]])
+AT_PARSER_CHECK([[./input]], [[1]], [[]],
+[[syntax error, unexpected 'a', expecting 'b' or 'c'
+]])
+
+AT_CLEANUP
+
## ------------------------- ##
## Unresolved SR Conflicts. ##
## ------------------------- ##
exp: exp OP exp | NUM;
]])
-AT_CHECK([bison input.y -o input.c --report=all], 0, [],
-[input.y: warning: 1 shift/reduce conflict
+AT_BISON_CHECK([-o input.c --report=all input.y], 0, [],
+[input.y: conflicts: 1 shift/reduce
])
# Check the contents of the report.
AT_CHECK([cat input.output], [],
-[[State 5 contains 1 shift/reduce conflict.
+[[State 5 conflicts: 1 shift/reduce
Grammar
state 5
- 1 exp: exp . OP exp [$end, OP]
+ 1 exp: exp . OP exp
1 | exp OP exp . [$end, OP]
OP shift, and go to state 4
exp: exp OP exp | NUM;
]])
-AT_CHECK([bison input.y -o input.c --report=all])
+AT_BISON_CHECK([-o input.c --report=all input.y])
# Check the contents of the report.
AT_CHECK([cat input.output], [],
state 5
- 1 exp: exp . OP exp [$end, OP]
+ 1 exp: exp . OP exp
1 | exp OP exp . [$end, OP]
$default reduce using rule 1 (exp)
+
Conflict between rule 1 and token OP resolved as reduce (%left OP).
]])
%%
]])
-AT_CHECK([bison input.y -o input.c --report=all], 0, [],
-[[input.y: warning: 1 reduce/reduce conflict
-input.y:4.4-8: warning: rule never reduced because of conflicts: id: '0'
+AT_BISON_CHECK([-o input.c --report=all input.y], 0, [],
+[[input.y: conflicts: 1 reduce/reduce
+input.y:4.6-8: warning: rule useless in parser due to conflicts: id: '0'
]])
# Check the contents of the report.
AT_CHECK([cat input.output], [],
-[[Rules never reduced
+[[Rules useless in parser due to conflicts
4 id: '0'
-State 1 contains 1 reduce/reduce conflict.
+State 1 conflicts: 1 reduce/reduce
Grammar
exp: exp OP exp | NUM;
]])
-AT_CHECK([bison input.y -o input.c], 1, [],
-[input.y: warning: 1 shift/reduce conflict
+AT_BISON_CHECK([-o input.c input.y], 1, [],
+[input.y: conflicts: 1 shift/reduce
input.y: expected 0 shift/reduce conflicts
])
AT_CLEANUP
exp: exp OP exp | NUM;
]])
-AT_CHECK([bison input.y -o input.c])
+AT_BISON_CHECK([-o input.c input.y])
AT_CLEANUP
exp: exp OP exp | NUM;
]])
-AT_CHECK([bison input.y -o input.c], 1, [],
-[input.y: warning: 1 shift/reduce conflict
+AT_BISON_CHECK([-o input.c input.y], 1, [],
+[input.y: conflicts: 1 shift/reduce
input.y: expected 2 shift/reduce conflicts
])
AT_CLEANUP
+
+
+## ------------------------------- ##
+## %expect with reduce conflicts. ##
+## ------------------------------- ##
+
+AT_SETUP([%expect with reduce conflicts])
+
+AT_DATA([input.y],
+[[%expect 0
+%%
+program: a 'a' | a a;
+a: 'a';
+]])
+
+AT_BISON_CHECK([-o input.c input.y], 1, [],
+[input.y: conflicts: 1 reduce/reduce
+input.y: expected 0 reduce/reduce conflicts
+])
+AT_CLEANUP
+
+
+## ------------------------- ##
+## %prec with user strings. ##
+## ------------------------- ##
+
+AT_SETUP([%prec with user string])
+
+AT_DATA([[input.y]],
+[[%%
+exp:
+ "foo" %prec "foo"
+;
+]])
+
+AT_BISON_CHECK([-o input.c input.y])
+AT_CLEANUP
+
+
+## -------------------------------- ##
+## %no-default-prec without %prec. ##
+## -------------------------------- ##
+
+AT_SETUP([%no-default-prec without %prec])
+
+AT_DATA([[input.y]],
+[[%left '+'
+%left '*'
+
+%%
+
+%no-default-prec;
+
+e: e '+' e
+ | e '*' e
+ | '0'
+ ;
+]])
+
+AT_BISON_CHECK([-o input.c input.y], 0, [],
+[[input.y: conflicts: 4 shift/reduce
+]])
+AT_CLEANUP
+
+
+## ----------------------------- ##
+## %no-default-prec with %prec. ##
+## ----------------------------- ##
+
+AT_SETUP([%no-default-prec with %prec])
+
+AT_DATA([[input.y]],
+[[%left '+'
+%left '*'
+
+%%
+
+%no-default-prec;
+
+e: e '+' e %prec '+'
+ | e '*' e %prec '*'
+ | '0'
+ ;
+]])
+
+AT_BISON_CHECK([-o input.c input.y])
+AT_CLEANUP
+
+
+## --------------- ##
+## %default-prec. ##
+## --------------- ##
+
+AT_SETUP([%default-prec])
+
+AT_DATA([[input.y]],
+[[%left '+'
+%left '*'
+
+%%
+
+%default-prec;
+
+e: e '+' e
+ | e '*' e
+ | '0'
+ ;
+]])
+
+AT_BISON_CHECK([-o input.c input.y])
+AT_CLEANUP
+
+
+## ---------------------------------------------- ##
+## Unreachable States After Conflict Resolution. ##
+## ---------------------------------------------- ##
+
+AT_SETUP([[Unreachable States After Conflict Resolution]])
+
+# If conflict resolution makes states unreachable, remove those states, report
+# rules that are then unused, and don't report conflicts in those states. Test
+# what happens when a nonterminal becomes useless as a result of state removal
+# since that causes lalr.o's goto map to be rewritten.
+
+AT_DATA([[input.y]],
+[[%output "input.c"
+%left 'a'
+
+%%
+
+start: resolved_conflict 'a' reported_conflicts 'a' ;
+
+/* S/R conflict resolved as reduce, so the state with item
+ * (resolved_conflict: 'a' . unreachable1) and all it transition successors are
+ * unreachable, and the associated production is useless. */
+resolved_conflict:
+ 'a' unreachable1
+ | %prec 'a'
+ ;
+
+/* S/R conflict that need not be reported since it is unreachable because of
+ * the previous conflict resolution. Nonterminal unreachable1 and all its
+ * productions are useless. */
+unreachable1:
+ 'a' unreachable2
+ |
+ ;
+
+/* Likewise for a R/R conflict and nonterminal unreachable2. */
+unreachable2: | ;
+
+/* Make sure remaining S/R and R/R conflicts are still reported correctly even
+ * when their states are renumbered due to state removal. */
+reported_conflicts:
+ 'a'
+ | 'a'
+ |
+ ;
+
+]])
+
+AT_BISON_CHECK([[--report=all input.y]], 0, [],
+[[input.y: conflicts: 1 shift/reduce, 1 reduce/reduce
+input.y:12.5-20: warning: rule useless in parser due to conflicts: resolved_conflict: 'a' unreachable1
+input.y:20.5-20: warning: rule useless in parser due to conflicts: unreachable1: 'a' unreachable2
+input.y:21.4: warning: rule useless in parser due to conflicts: unreachable1: /* empty */
+input.y:25.13: warning: rule useless in parser due to conflicts: unreachable2: /* empty */
+input.y:25.16: warning: rule useless in parser due to conflicts: unreachable2: /* empty */
+input.y:31.5-7: warning: rule useless in parser due to conflicts: reported_conflicts: 'a'
+input.y:32.4: warning: rule useless in parser due to conflicts: reported_conflicts: /* empty */
+]])
+
+AT_CHECK([[cat input.output]], 0,
+[[Rules useless in parser due to conflicts
+
+ 2 resolved_conflict: 'a' unreachable1
+
+ 4 unreachable1: 'a' unreachable2
+ 5 | /* empty */
+
+ 6 unreachable2: /* empty */
+ 7 | /* empty */
+
+ 9 reported_conflicts: 'a'
+ 10 | /* empty */
+
+
+State 4 conflicts: 1 shift/reduce
+State 5 conflicts: 1 reduce/reduce
+
+
+Grammar
+
+ 0 $accept: start $end
+
+ 1 start: resolved_conflict 'a' reported_conflicts 'a'
+
+ 2 resolved_conflict: 'a' unreachable1
+ 3 | /* empty */
+
+ 4 unreachable1: 'a' unreachable2
+ 5 | /* empty */
+
+ 6 unreachable2: /* empty */
+ 7 | /* empty */
+
+ 8 reported_conflicts: 'a'
+ 9 | 'a'
+ 10 | /* empty */
+
+
+Terminals, with rules where they appear
+
+$end (0) 0
+'a' (97) 1 2 4 8 9
+error (256)
+
+
+Nonterminals, with rules where they appear
+
+$accept (4)
+ on left: 0
+start (5)
+ on left: 1, on right: 0
+resolved_conflict (6)
+ on left: 2 3, on right: 1
+unreachable1 (7)
+ on left: 4 5, on right: 2
+unreachable2 (8)
+ on left: 6 7, on right: 4
+reported_conflicts (9)
+ on left: 8 9 10, on right: 1
+
+
+state 0
+
+ 0 $accept: . start $end
+ 1 start: . resolved_conflict 'a' reported_conflicts 'a'
+ 2 resolved_conflict: . 'a' unreachable1
+ 3 | . ['a']
+
+ $default reduce using rule 3 (resolved_conflict)
+
+ start go to state 1
+ resolved_conflict go to state 2
+
+ Conflict between rule 3 and token 'a' resolved as reduce (%left 'a').
+
+
+state 1
+
+ 0 $accept: start . $end
+
+ $end shift, and go to state 3
+
+
+state 2
+
+ 1 start: resolved_conflict . 'a' reported_conflicts 'a'
+
+ 'a' shift, and go to state 4
+
+
+state 3
+
+ 0 $accept: start $end .
+
+ $default accept
+
+
+state 4
+
+ 1 start: resolved_conflict 'a' . reported_conflicts 'a'
+ 8 reported_conflicts: . 'a'
+ 9 | . 'a'
+ 10 | . ['a']
+
+ 'a' shift, and go to state 5
+
+ 'a' [reduce using rule 10 (reported_conflicts)]
+
+ reported_conflicts go to state 6
+
+
+state 5
+
+ 8 reported_conflicts: 'a' . ['a']
+ 9 | 'a' . ['a']
+
+ 'a' reduce using rule 8 (reported_conflicts)
+ 'a' [reduce using rule 9 (reported_conflicts)]
+ $default reduce using rule 8 (reported_conflicts)
+
+
+state 6
+
+ 1 start: resolved_conflict 'a' reported_conflicts . 'a'
+
+ 'a' shift, and go to state 7
+
+
+state 7
+
+ 1 start: resolved_conflict 'a' reported_conflicts 'a' .
+
+ $default reduce using rule 1 (start)
+]])
+
+AT_DATA([[input-keep.y]],
+[[%define lr.keep-unreachable-states
+]])
+AT_CHECK([[cat input.y >> input-keep.y]])
+
+AT_BISON_CHECK([[input-keep.y]], 0, [],
+[[input-keep.y: conflicts: 2 shift/reduce, 2 reduce/reduce
+input-keep.y:22.4: warning: rule useless in parser due to conflicts: unreachable1: /* empty */
+input-keep.y:26.16: warning: rule useless in parser due to conflicts: unreachable2: /* empty */
+input-keep.y:32.5-7: warning: rule useless in parser due to conflicts: reported_conflicts: 'a'
+input-keep.y:33.4: warning: rule useless in parser due to conflicts: reported_conflicts: /* empty */
+]])
+
+AT_CLEANUP
+
+
+## ------------------------------------------------------------ ##
+## Solved conflicts report for multiple reductions in a state. ##
+## ------------------------------------------------------------ ##
+
+AT_SETUP([[Solved conflicts report for multiple reductions in a state]])
+
+# Used to lose earlier solved conflict messages even within a single S/R/R.
+
+AT_DATA([[input.y]],
+[[%left 'a'
+%right 'b'
+%right 'c'
+%right 'd'
+%%
+start:
+ 'a'
+ | empty_a 'a'
+ | 'b'
+ | empty_b 'b'
+ | 'c'
+ | empty_c1 'c'
+ | empty_c2 'c'
+ | empty_c3 'c'
+ ;
+empty_a: %prec 'a' ;
+empty_b: %prec 'b' ;
+empty_c1: %prec 'c' ;
+empty_c2: %prec 'c' ;
+empty_c3: %prec 'd' ;
+]])
+AT_BISON_CHECK([[--report=all -o input.c input.y]], 0, [], [ignore])
+AT_CHECK([[cat input.output | sed -n '/^state 0$/,/^state 1$/p']], 0,
+[[state 0
+
+ 0 $accept: . start $end
+ 1 start: . 'a'
+ 2 | . empty_a 'a'
+ 3 | . 'b'
+ 4 | . empty_b 'b'
+ 5 | . 'c'
+ 6 | . empty_c1 'c'
+ 7 | . empty_c2 'c'
+ 8 | . empty_c3 'c'
+ 9 empty_a: . ['a']
+ 10 empty_b: . []
+ 11 empty_c1: . []
+ 12 empty_c2: . []
+ 13 empty_c3: . ['c']
+
+ 'b' shift, and go to state 1
+
+ 'c' reduce using rule 13 (empty_c3)
+ $default reduce using rule 9 (empty_a)
+
+ start go to state 2
+ empty_a go to state 3
+ empty_b go to state 4
+ empty_c1 go to state 5
+ empty_c2 go to state 6
+ empty_c3 go to state 7
+
+ Conflict between rule 9 and token 'a' resolved as reduce (%left 'a').
+ Conflict between rule 10 and token 'b' resolved as shift (%right 'b').
+ Conflict between rule 11 and token 'c' resolved as shift (%right 'c').
+ Conflict between rule 12 and token 'c' resolved as shift (%right 'c').
+ Conflict between rule 13 and token 'c' resolved as reduce ('c' < 'd').
+
+
+state 1
+]])
+
+AT_CLEANUP
+
+
+## ------------------------------------------------------------ ##
+## %nonassoc error actions for multiple reductions in a state. ##
+## ------------------------------------------------------------ ##
+
+# Used to abort when trying to resolve conflicts as %nonassoc error actions for
+# multiple reductions in a state.
+
+# For a %nonassoc error action token, used to print the first remaining
+# reduction on that token without brackets.
+
+AT_SETUP([[%nonassoc error actions for multiple reductions in a state]])
+
+AT_DATA([[input.y]],
+[[%nonassoc 'a' 'b' 'c'
+%%
+start:
+ 'a'
+ | empty_a 'a'
+ | 'b'
+ | empty_b 'b'
+ | 'c'
+ | empty_c1 'c'
+ | empty_c2 'c'
+ | empty_c3 'c'
+ ;
+empty_a: %prec 'a' ;
+empty_b: %prec 'b' ;
+empty_c1: %prec 'c' ;
+empty_c2: %prec 'c' ;
+empty_c3: %prec 'c' ;
+]])
+
+AT_BISON_CHECK([[--report=all -o input.c input.y]], 0, [], [ignore])
+AT_CHECK([[cat input.output | sed -n '/^state 0$/,/^state 1$/p']], 0,
+[[state 0
+
+ 0 $accept: . start $end
+ 1 start: . 'a'
+ 2 | . empty_a 'a'
+ 3 | . 'b'
+ 4 | . empty_b 'b'
+ 5 | . 'c'
+ 6 | . empty_c1 'c'
+ 7 | . empty_c2 'c'
+ 8 | . empty_c3 'c'
+ 9 empty_a: . []
+ 10 empty_b: . []
+ 11 empty_c1: . []
+ 12 empty_c2: . ['c']
+ 13 empty_c3: . ['c']
+
+ 'a' error (nonassociative)
+ 'b' error (nonassociative)
+ 'c' error (nonassociative)
+
+ 'c' [reduce using rule 12 (empty_c2)]
+ 'c' [reduce using rule 13 (empty_c3)]
+
+ start go to state 1
+ empty_a go to state 2
+ empty_b go to state 3
+ empty_c1 go to state 4
+ empty_c2 go to state 5
+ empty_c3 go to state 6
+
+ Conflict between rule 9 and token 'a' resolved as an error (%nonassoc 'a').
+ Conflict between rule 10 and token 'b' resolved as an error (%nonassoc 'b').
+ Conflict between rule 11 and token 'c' resolved as an error (%nonassoc 'c').
+
+
+state 1
+]])
+AT_CLEANUP
+
+
+## --------------------------------- ##
+## -W versus %expect and %expect-rr ##
+## --------------------------------- ##
+
+AT_SETUP([[-W versus %expect and %expect-rr]])
+
+AT_DATA([[sr-rr.y]],
+[[%glr-parser
+%%
+start: 'a' | A 'a' | B 'a' ;
+A: ;
+B: ;
+]])
+AT_DATA([[sr.y]],
+[[%glr-parser
+%%
+start: 'a' | A 'a' ;
+A: ;
+]])
+AT_DATA([[rr.y]],
+[[%glr-parser
+%%
+start: A | B ;
+A: ;
+B: ;
+]])
+
+AT_BISON_CHECK([[sr-rr.y]], [[0]], [[]],
+[[sr-rr.y: conflicts: 1 shift/reduce, 1 reduce/reduce
+]])
+AT_BISON_CHECK([[-Wno-conflicts-sr sr-rr.y]], [[0]], [[]],
+[[sr-rr.y: conflicts: 1 reduce/reduce
+]])
+AT_BISON_CHECK([[-Wno-conflicts-rr sr-rr.y]], [[0]], [[]],
+[[sr-rr.y: conflicts: 1 shift/reduce
+]])
+
+[for gram in sr-rr sr rr; do
+ for sr_exp_i in '' 0 1 2; do
+ for rr_exp_i in '' 0 1 2; do
+ test -z "$sr_exp_i" && test -z "$rr_exp_i" && continue
+
+ # Build grammar file.
+ sr_exp=0
+ rr_exp=0
+ file=$gram
+ directives=
+ if test -n "$sr_exp_i"; then
+ sr_exp=$sr_exp_i
+ file=$file-expect-$sr_exp
+ directives="%expect $sr_exp"
+ fi
+ if test -n "$rr_exp_i"; then
+ rr_exp=$rr_exp_i
+ file=$file-expect-rr-$rr_exp
+ directives="$directives %expect-rr $rr_exp"
+ fi
+ file=$file.y
+ echo "$directives" > $file
+ cat $gram.y >> $file
+
+ # Count actual conflicts.
+ conflicts=
+ sr_count=0
+ rr_count=0
+ if test $gram = sr || test $gram = sr-rr; then
+ conflicts="1 shift/reduce"
+ sr_count=1
+ fi
+ if test $gram = rr || test $gram = sr-rr; then
+ if test -n "$conflicts"; then
+ conflicts="$conflicts, "
+ fi
+ conflicts="${conflicts}1 reduce/reduce"
+ rr_count=1
+ fi
+
+ # Run tests.
+ if test $sr_count -eq $sr_exp && test $rr_count -eq $rr_exp; then
+ ]AT_BISON_CHECK([[-Wnone $file]])[
+ ]AT_BISON_CHECK([[-Werror $file]])[
+ else
+ echo "$file: conflicts: $conflicts" > experr
+ if test $sr_count -ne $sr_exp; then
+ if test $sr_exp -ne 1; then s=s; else s= ; fi
+ echo "$file: expected $sr_exp shift/reduce conflict$s" >> experr
+ fi
+ if test $rr_count -ne $rr_exp; then
+ if test $rr_exp -ne 1; then s=s; else s= ; fi
+ echo "$file: expected $rr_exp reduce/reduce conflict$s" >> experr
+ fi
+ ]AT_BISON_CHECK([[-Wnone $file]], [[1]], [[]], [[experr]])[
+ ]AT_BISON_CHECK([[-Werror $file]], [[1]], [[]], [[experr]])[
+ fi
+ done
+ done
+done]
+
+AT_CLEANUP