1 /* IELR's inadequacy annotation list.
3 Copyright (C) 2009-2014 Free Software Foundation, Inc.
5 This file is part of Bison, the GNU Compiler Compiler.
7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "AnnotationList.h"
29 * - <tt>annotations_obstackp != NULL</tt>.
31 * - \c result is a new \c AnnotationList with one node whose:
32 * - \c inadequacyNode member is \c NULL.
33 * - \c contributions member is allocated with \c contribution_count
34 * uninitialized elements.
35 * - All memory was allocated on \c annotations_obstackp.
37 static AnnotationList
*
38 AnnotationList__alloc_on_obstack (ContributionIndex contribution_count
,
39 struct obstack
*annotations_obstackp
)
41 AnnotationList
*result
;
42 size_t contributions_size
=
43 contribution_count
* sizeof result
->contributions
[0];
44 result
= obstack_alloc (annotations_obstackp
,
45 offsetof (AnnotationList
, contributions
)
46 + contributions_size
);
48 result
->inadequacyNode
= NULL
;
54 * - <tt>self != NULL</tt>.
55 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
57 * - \c result = true iff contribution \c ci in \c self represents an
58 * "always" contribution.
61 AnnotationList__isContributionAlways (AnnotationList
const *self
,
64 aver (0 <= ci
&& ci
< self
->inadequacyNode
->contributionCount
);
65 return self
->contributions
[ci
] == NULL
;
70 * - \c self is a single node.
71 * - \c self annotates the same state as every other node in \c list, and
72 * that state has \c nitems kernel items.
74 * - If the list \c list already contains an identical annotation to \c self,
75 * \c self was discarded, \c result is false, and the caller is responsible
76 * for the memory of \c self.
77 * - Otherwise, \c list now contains the node \c self, \c result is true, and
78 * \c list assumes responsibility for the memory of \c self.
79 * - The sort in \c list is:
80 * - Sort in reverse order on the unique ID of the associated
81 * inadequacy node. Because these IDs are assigned in ascending
82 * order, this should mean that the insertion position within an
83 * annotation list is usually near the beginning with other
84 * annotations associated with the same inadequacy.
85 * - Next, sort on the first contribution that is different as follows:
86 * - Sort an always-contribution before a never-contribution before a
87 * potential-contribution.
88 * - Two always-contributions are identical.
89 * - Two never-contributions are identical.
90 * - For two potential-contributions, sort on the contributions' kernel
91 * item bitsets interpreted as binary numbers.
92 * - The sorting has a few effects:
93 * - It accelerates elimination of identical annotations during insertion.
94 * - It determines how the output of \c AnnotationList__debug is sorted.
95 * - Other than that, it's probably not important.
98 AnnotationList__insertInto (AnnotationList
*self
, AnnotationList
**list
,
101 AnnotationList
**node
;
102 for (node
= list
; *node
; node
= &(*node
)->next
)
105 ContributionIndex ci
;
106 if (self
->inadequacyNode
->id
< (*node
)->inadequacyNode
->id
)
108 else if ((*node
)->inadequacyNode
->id
< self
->inadequacyNode
->id
)
112 cmp
== 0 && ci
< self
->inadequacyNode
->contributionCount
;
115 if (AnnotationList__isContributionAlways (self
, ci
))
117 if (!AnnotationList__isContributionAlways (*node
, ci
))
120 else if (AnnotationList__isContributionAlways (*node
, ci
))
125 for (item
= 0; cmp
== 0 && item
< nitems
; ++item
)
127 if (!Sbitset__test (self
->contributions
[ci
], item
))
129 if (Sbitset__test ((*node
)->contributions
[ci
], item
))
132 else if (!Sbitset__test ((*node
)->contributions
[ci
], item
))
155 AnnotationList__compute_shift_tokens (transitions
*trans
)
157 bitset shift_tokens
= bitset_create (ntokens
, BITSET_FIXED
);
159 FOR_EACH_SHIFT (trans
, i
)
160 bitset_set (shift_tokens
, TRANSITION_SYMBOL (trans
, i
));
165 AnnotationList__compute_conflicted_tokens (bitset shift_tokens
,
168 bitset conflicted_tokens
= bitset_create (ntokens
, BITSET_FIXED
);
169 bitset conflicted_tokens_rule
= bitset_create (ntokens
, BITSET_FIXED
);
170 bitset tokens
= bitset_create (ntokens
, BITSET_FIXED
);
173 bitset_copy (tokens
, shift_tokens
);
174 for (i
= 0; i
< reds
->num
; ++i
)
176 bitset_and (conflicted_tokens_rule
, tokens
, reds
->lookahead_tokens
[i
]);
177 bitset_or (conflicted_tokens
,
178 conflicted_tokens
, conflicted_tokens_rule
);
179 bitset_or (tokens
, tokens
, reds
->lookahead_tokens
[i
]);
180 /* Check that rules are sorted on rule number or the next step in
181 AnnotationList__compute_from_inadequacies will misbehave. */
182 aver (i
== 0 || reds
->rules
[i
-1] < reds
->rules
[i
]);
185 bitset_free (tokens
);
186 bitset_free (conflicted_tokens_rule
);
188 return conflicted_tokens
;
192 AnnotationList__compute_lhs_contributions (state
*s
, rule
*the_rule
,
193 symbol_number conflicted_token
,
194 bitsetv follow_kernel_items
,
195 bitsetv always_follows
,
196 state
***predecessors
,
197 bitset
**item_lookahead_sets
,
200 *annotations_obstackp
)
202 goto_number lhs_goto
= map_goto (s
->number
, the_rule
->lhs
->number
);
203 if (bitset_test (always_follows
[lhs_goto
], conflicted_token
))
205 *items
= Sbitset__new_on_obstack (s
->nitems
, annotations_obstackp
);
207 bitset_iterator biter_item
;
209 BITSET_FOR_EACH (biter_item
, follow_kernel_items
[lhs_goto
], item
, 0)
210 if (ielr_item_has_lookahead (s
, 0, item
, conflicted_token
,
211 predecessors
, item_lookahead_sets
))
212 Sbitset__set (*items
, item
);
218 AnnotationList__computePredecessorAnnotations (AnnotationList
*self
, state
*s
,
219 bitsetv follow_kernel_items
,
220 bitsetv always_follows
,
221 state
***predecessors
,
222 bitset
**item_lookahead_sets
,
228 *annotations_obstackp
)
231 for (predecessor
= predecessors
[s
->number
]; *predecessor
; ++predecessor
)
233 AnnotationList
*annotation_node
=
234 AnnotationList__alloc_on_obstack (
235 self
->inadequacyNode
->contributionCount
, annotations_obstackp
);
236 annotation_node
->inadequacyNode
= self
->inadequacyNode
;
237 bool potential_contribution
= false;
238 bitset
*lookaheads
= NULL
;
240 ContributionIndex ci
;
241 for (ci
= 0; ci
< self
->inadequacyNode
->contributionCount
; ++ci
)
243 symbol_number contribution_token
=
244 InadequacyList__getContributionToken (self
->inadequacyNode
, ci
)
246 if (AnnotationList__isContributionAlways (self
, ci
))
248 annotation_node
->contributions
[ci
] = NULL
;
251 annotation_node
->contributions
[ci
] =
252 Sbitset__new_on_obstack ((*predecessor
)->nitems
,
253 annotations_obstackp
);
255 size_t predecessor_item
= 0;
257 Sbitset__Index self_item
;
258 SBITSET__FOR_EACH (self
->contributions
[ci
], s
->nitems
,
259 sbiter_item
, self_item
)
261 /* If this kernel item is the beginning of a RHS, it must be
262 the kernel item in the start state, and so it has an empty
263 lookahead set. Thus, it can't contribute to inadequacies,
264 and so it should never have been identified as a
265 contribution. If, instead, this kernel item is the
266 successor of the start state's kernel item, the lookahead
267 set is still empty, and so it also should never have been
268 identified as a contribution. This situation is fortunate
269 because we want to avoid the - 2 below in both cases. */
270 aver (s
->items
[self_item
] > 1);
271 /* If this kernel item is next to the beginning of the RHS,
272 then check all of the predecessor's goto follows for the
274 if (item_number_is_rule_number (ritem
[s
->items
[self_item
]
279 for (rulei
= s
->items
[self_item
];
280 !item_number_is_rule_number (ritem
[rulei
]);
283 if (AnnotationList__compute_lhs_contributions (
285 &rules
[item_number_as_rule_number (ritem
[rulei
])],
287 follow_kernel_items
, always_follows
, predecessors
,
288 item_lookahead_sets
, &items
, annotations_obstackp
))
290 obstack_free (annotations_obstackp
,
291 annotation_node
->contributions
[ci
]);
292 annotation_node
->contributions
[ci
] = NULL
;
297 Sbitset__or (annotation_node
->contributions
[ci
],
298 annotation_node
->contributions
[ci
],
299 items
, (*predecessor
)->nitems
);
300 obstack_free (annotations_obstackp
, items
);
303 /* If this kernel item is later in the RHS, then check the
304 predecessor item's lookahead set. */
307 /* We don't have to start the predecessor item search at
308 the beginning every time because items from both
309 states are sorted by their indices in ritem. */
311 predecessor_item
< (*predecessor
)->nitems
;
313 if ((*predecessor
)->items
[predecessor_item
]
314 == s
->items
[self_item
] - 1)
316 aver (predecessor_item
!= (*predecessor
)->nitems
);
317 if (ielr_item_has_lookahead (*predecessor
, 0,
321 item_lookahead_sets
))
322 Sbitset__set (annotation_node
->contributions
[ci
],
327 if (annotation_node
->contributions
[ci
])
331 SBITSET__FOR_EACH (annotation_node
->contributions
[ci
],
332 (*predecessor
)->nitems
, biter
, i
)
334 potential_contribution
= true;
338 lookaheads
= xnmalloc ((*predecessor
)->nitems
,
340 for (j
= 0; j
< (*predecessor
)->nitems
; ++j
)
341 lookaheads
[j
] = NULL
;
344 lookaheads
[i
] = bitset_create (ntokens
, BITSET_FIXED
);
345 bitset_set (lookaheads
[i
], contribution_token
);
351 /* If the predecessor has any contributions besides just "always" and
352 "never" contributions:
353 - If the dominant contribution is split-stable, the annotation could
354 not affect merging on this predecessor state or its eventual
355 predecessor states. Moreover, all contributions that affect
356 whether the dominant contribution remains dominant must be "always"
357 or "never" contributions in order for the dominant contribution to
358 be split-stable. Thus, the dominant contribution computation result
359 in eventual successor states will not be affected by lookaheads
360 tracked for this predecessor state. (Also, as in the isocore
361 compatibility test, we depend on the fact that isocores with equal
362 dominant contributions will have the same dominant contribution when
363 merged. Otherwise, we might have to worry that the presence of a
364 potential contribution might somehow be the culprit of that behavior
365 and thus need to be tracked regardless of the split stability of the
366 dominant contribution.) Thus, go ahead and discard the annotation
367 to save space now plus time during state splitting.
368 - Otherwise, record the annotation, and compute any resulting
369 annotations needed on predecessor states. */
370 if (potential_contribution
)
372 if (ContributionIndex__none
373 != AnnotationList__computeDominantContribution (
374 annotation_node
, (*predecessor
)->nitems
, lookaheads
, true))
376 obstack_free (annotations_obstackp
, annotation_node
);
377 annotation_node
= NULL
;
381 for (i
= 0; i
< (*predecessor
)->nitems
; ++i
)
383 bitset_free (lookaheads
[i
]);
388 if (AnnotationList__insertInto (annotation_node
,
389 &annotation_lists
[(*predecessor
)
391 (*predecessor
)->nitems
))
393 ++annotation_counts
[(*predecessor
)->number
];
394 AnnotationList__computePredecessorAnnotations (
395 annotation_node
, *predecessor
,
396 follow_kernel_items
, always_follows
, predecessors
,
397 item_lookahead_sets
, annotation_lists
, annotation_counts
,
398 annotations_obstackp
);
401 obstack_free (annotations_obstackp
, annotation_node
);
405 obstack_free (annotations_obstackp
, annotation_node
);
410 AnnotationList__compute_from_inadequacies (
411 state
*s
, bitsetv follow_kernel_items
, bitsetv always_follows
,
412 state
***predecessors
, bitset
**item_lookahead_sets
,
413 InadequacyList
**inadequacy_lists
, AnnotationList
**annotation_lists
,
414 AnnotationIndex
*annotation_counts
,
415 ContributionIndex
*max_contributionsp
,
416 struct obstack
*annotations_obstackp
,
417 InadequacyListNodeCount
*inadequacy_list_node_count
)
419 bitsetv all_lookaheads
;
421 bitset conflicted_tokens
;
422 bitset_iterator biter_conflict
;
423 bitset_bindex conflicted_token
;
425 /* Return an empty list if s->lookahead_tokens = NULL. */
429 all_lookaheads
= bitsetv_create (s
->nitems
, ntokens
, BITSET_FIXED
);
430 bitsetv_ones (all_lookaheads
);
431 shift_tokens
= AnnotationList__compute_shift_tokens (s
->transitions
);
433 AnnotationList__compute_conflicted_tokens (shift_tokens
, s
->reductions
);
435 /* Add an inadequacy annotation for each conflicted_token. */
436 BITSET_FOR_EACH (biter_conflict
, conflicted_tokens
, conflicted_token
, 0)
438 AnnotationList
*annotation_node
;
439 /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now
440 or convert it inside InadequacyList__new_conflict? */
441 bitset actions
= bitset_create (s
->reductions
->num
+ 1, BITSET_FIXED
);
442 ContributionIndex contribution_count
= 0;
443 bool potential_contribution
= false;
445 /* Allocate the annotation node. */
448 for (rule_i
= 0; rule_i
< s
->reductions
->num
; ++rule_i
)
449 if (bitset_test (s
->reductions
->lookahead_tokens
[rule_i
],
451 ++contribution_count
;
452 if (bitset_test (shift_tokens
, conflicted_token
))
453 ++contribution_count
;
455 AnnotationList__alloc_on_obstack (contribution_count
,
456 annotations_obstackp
);
459 /* Add a contribution for each reduction that has conflicted_token as a
462 ContributionIndex ci
= 0;
465 for (rule_i
= 0; rule_i
< s
->reductions
->num
; ++rule_i
)
467 rule
*the_rule
= s
->reductions
->rules
[rule_i
];
468 if (bitset_test (s
->reductions
->lookahead_tokens
[rule_i
],
471 bitset_set (actions
, rule_i
);
472 /* If this reduction is on a kernel item, just add it. */
473 if (!item_number_is_rule_number (the_rule
->rhs
[0]))
475 annotation_node
->contributions
[ci
] =
476 Sbitset__new_on_obstack (s
->nitems
,
477 annotations_obstackp
);
478 /* Catch item_i up to rule_i. This works because both are
479 sorted on rule number. */
480 while (!item_number_is_rule_number (
481 ritem
[s
->items
[item_i
]])
482 || item_number_as_rule_number (
483 ritem
[s
->items
[item_i
]])
487 aver (item_i
< s
->nitems
);
489 Sbitset__set (annotation_node
->contributions
[ci
], item_i
);
491 /* Otherwise, add the kernel items whose lookahead sets
492 contribute the conflicted token to this reduction's
494 else if (AnnotationList__compute_lhs_contributions (
495 s
, the_rule
, conflicted_token
, follow_kernel_items
,
496 always_follows
, predecessors
, item_lookahead_sets
,
497 &annotation_node
->contributions
[ci
],
498 annotations_obstackp
))
500 annotation_node
->contributions
[ci
++] = NULL
;
503 /* The lookahead token has to come from somewhere. */
504 aver (!Sbitset__isEmpty (annotation_node
->contributions
[ci
],
507 potential_contribution
= true;
512 /* If there are any contributions besides just "always" contributions:
513 - If there's also a shift contribution, record it.
514 - If the dominant contribution is split-stable, then the annotation
515 could not affect merging, so go ahead and discard the annotation and
516 the inadequacy to save space now plus time during state splitting.
517 - Otherwise, record the annotation and the inadequacy, and compute any
518 resulting annotations needed on predecessor states. */
519 if (potential_contribution
)
521 if (bitset_test (shift_tokens
, conflicted_token
))
523 bitset_set (actions
, s
->reductions
->num
);
524 annotation_node
->contributions
[contribution_count
- 1] = NULL
;
527 InadequacyList
*conflict_node
=
528 InadequacyList__new_conflict (
529 s
, symbols
[conflicted_token
], actions
,
530 inadequacy_list_node_count
);
532 annotation_node
->inadequacyNode
= conflict_node
;
533 if (ContributionIndex__none
534 != AnnotationList__computeDominantContribution (
535 annotation_node
, s
->nitems
, all_lookaheads
, true))
537 obstack_free (annotations_obstackp
, annotation_node
);
538 InadequacyList__delete (conflict_node
);
542 InadequacyList__prependTo (conflict_node
,
543 &inadequacy_lists
[s
->number
]);
546 AnnotationList__insertInto (annotation_node
,
547 &annotation_lists
[s
->number
],
551 /* This aver makes sure the
552 AnnotationList__computeDominantContribution check above
553 does discard annotations in the simplest case of a S/R
554 conflict with no token precedence. */
555 aver (!bitset_test (shift_tokens
, conflicted_token
)
556 || symbols
[conflicted_token
]->prec
);
557 ++annotation_counts
[s
->number
];
558 if (contribution_count
> *max_contributionsp
)
559 *max_contributionsp
= contribution_count
;
560 AnnotationList__computePredecessorAnnotations (
562 follow_kernel_items
, always_follows
, predecessors
,
563 item_lookahead_sets
, annotation_lists
, annotation_counts
,
564 annotations_obstackp
);
570 bitset_free (actions
);
571 obstack_free (annotations_obstackp
, annotation_node
);
575 bitsetv_free (all_lookaheads
);
576 bitset_free (shift_tokens
);
577 bitset_free (conflicted_tokens
);
581 AnnotationList__debug (AnnotationList
const *self
, size_t nitems
, int spaces
)
583 AnnotationList
const *a
;
585 for (a
= self
, ai
= 0; a
; a
= a
->next
, ++ai
)
589 for (j
= 0; j
< spaces
; ++j
)
592 fprintf (stderr
, "Annotation %d (manifesting state %d):\n",
593 ai
, a
->inadequacyNode
->manifestingState
->number
);
595 ContributionIndex ci
;
596 bitset_bindex rulei
= 0; /* init suppresses compiler warning */
597 rulei
= bitset_first (a
->inadequacyNode
->inadequacy
.conflict
.actions
);
598 for (ci
= 0; ci
< a
->inadequacyNode
->contributionCount
; ++ci
)
600 symbol_number token
=
601 InadequacyList__getContributionToken (a
->inadequacyNode
, ci
)
605 for (j
= 0; j
< spaces
+2; ++j
)
608 if (ci
== InadequacyList__getShiftContributionIndex (
610 fprintf (stderr
, "Contributes shift of token %d.\n", token
);
613 fprintf (stderr
, "Contributes token %d", token
);
614 aver (rulei
!= BITSET_BINDEX_MAX
);
615 fprintf (stderr
, " as lookahead, rule number %d",
616 a
->inadequacyNode
->manifestingState
617 ->reductions
->rules
[rulei
]->number
);
619 bitset_next (a
->inadequacyNode
->inadequacy
.conflict
.actions
,
621 if (AnnotationList__isContributionAlways (a
, ci
))
622 fprintf (stderr
, " always.");
625 fprintf (stderr
, ", items: ");
626 Sbitset__fprint (a
->contributions
[ci
], nitems
, stderr
);
628 fprintf (stderr
, "\n");
636 AnnotationList__computeLookaheadFilter (AnnotationList
const *self
,
638 bitsetv lookahead_filter
)
640 bitsetv_zero (lookahead_filter
);
641 for (; self
; self
= self
->next
)
643 ContributionIndex ci
;
644 for (ci
= 0; ci
< self
->inadequacyNode
->contributionCount
; ++ci
)
645 if (!AnnotationList__isContributionAlways (self
, ci
))
649 symbol_number token
=
650 InadequacyList__getContributionToken (self
->inadequacyNode
, ci
)
652 SBITSET__FOR_EACH (self
->contributions
[ci
], nitems
, biter
, item
)
653 bitset_set (lookahead_filter
[item
], token
);
660 * - <tt>self != NULL</tt>.
661 * - \c nitems is the number of kernel items in the LR(0) state that \c self
663 * - \c lookaheads describes the lookahead sets on the kernel items of some
664 * isocore of the LR(0) state that \c self annotates. Either:
665 * - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel
667 * - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either:
668 * - \c NULL only if the lookahead set on kernel item \c i is empty.
669 * - The (possibly empty) lookahead set on kernel item \c i.
670 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
672 * - \c result = true iff contribution \c ci in \c self is made by the state
673 * described by \c lookaheads.
676 AnnotationList__stateMakesContribution (AnnotationList
const *self
,
677 size_t nitems
, ContributionIndex ci
,
680 if (AnnotationList__isContributionAlways (self
, ci
))
685 symbol_number token
=
686 InadequacyList__getContributionToken (self
->inadequacyNode
, ci
)->number
;
689 SBITSET__FOR_EACH (self
->contributions
[ci
], nitems
, biter
, item
)
690 if (lookaheads
[item
] && bitset_test (lookaheads
[item
], token
))
697 AnnotationList__computeDominantContribution (AnnotationList
const *self
,
698 size_t nitems
, bitset
*lookaheads
,
699 bool require_split_stable
)
702 ContributionIndex
const ci_shift
=
703 InadequacyList__getShiftContributionIndex (self
->inadequacyNode
);
705 token
= self
->inadequacyNode
->inadequacy
.conflict
.token
;
708 if (ci_shift
!= ContributionIndex__none
)
710 bool find_stable_domination_over_shift
= false;
711 bool find_stable_error_action_domination
= false;
713 ContributionIndex ci
;
715 ContributionIndex ci_rr_dominator
= ContributionIndex__none
;
716 int shift_precedence
= token
->prec
;
718 /* If the token has no precedence set, shift is always chosen. */
719 if (!shift_precedence
)
722 /* Figure out which reductions contribute, which of those would
723 dominate in a R/R comparison, and whether any reduction dominates
724 the shift so that the R/R comparison is actually needed. */
725 for (ci
= 0, actioni
= bitset_first (self
->inadequacyNode
->inadequacy
727 ci
< self
->inadequacyNode
->contributionCount
;
728 ++ci
, actioni
= bitset_next (self
->inadequacyNode
->inadequacy
729 .conflict
.actions
, actioni
+1))
731 int reduce_precedence
= 0;
735 rule
*r
= self
->inadequacyNode
->manifestingState
736 ->reductions
->rules
[actioni
];
738 reduce_precedence
= r
->prec
->prec
;
740 /* If there's no need to check whether this reduction actually
741 contributes because the shift eliminates it from the R/R
742 comparison anyway, continue to the next reduction. */
743 if (reduce_precedence
744 && (reduce_precedence
< shift_precedence
745 || (reduce_precedence
== shift_precedence
746 && token
->assoc
== right_assoc
)))
748 if (!AnnotationList__stateMakesContribution (self
, nitems
, ci
,
751 /* This uneliminated reduction contributes, so see if it can cause
753 if (reduce_precedence
== shift_precedence
754 && token
->assoc
== non_assoc
)
756 /* It's not possible to find split-stable domination over
757 shift after a potential %nonassoc. */
758 if (find_stable_domination_over_shift
)
759 return ContributionIndex__none
;
760 if (!require_split_stable
761 || AnnotationList__isContributionAlways (self
, ci
))
762 return ContributionIndex__error_action
;
763 find_stable_error_action_domination
= true;
765 /* Consider this uneliminated contributing reduction in the R/R
767 if (ci_rr_dominator
== ContributionIndex__none
)
768 ci_rr_dominator
= ci
;
769 /* If precedence is set for this uneliminated contributing
770 reduction, it dominates the shift, so try to figure out which
771 reduction dominates the R/R comparison. */
772 if (reduce_precedence
)
774 /* It's not possible to find split-stable error action
775 domination after a potential reduction. */
776 if (find_stable_error_action_domination
)
777 return ContributionIndex__none
;
778 if (!require_split_stable
)
779 return ci_rr_dominator
;
780 if (!AnnotationList__isContributionAlways (self
,
782 return ContributionIndex__none
;
783 if (AnnotationList__isContributionAlways (self
, ci
))
784 return ci_rr_dominator
;
785 find_stable_domination_over_shift
= true;
789 if (find_stable_domination_over_shift
790 || find_stable_error_action_domination
)
791 return ContributionIndex__none
;
792 /* No reduce or error action domination found, so shift dominates. */
796 /* R/R conflict, so the reduction with the lowest rule number dominates.
797 Fortunately, contributions are sorted by rule number. */
799 ContributionIndex ci
;
800 for (ci
= 0; ci
< self
->inadequacyNode
->contributionCount
; ++ci
)
801 if (AnnotationList__stateMakesContribution (self
, nitems
, ci
,
804 if (require_split_stable
805 && !AnnotationList__isContributionAlways (self
, ci
))
806 return ContributionIndex__none
;
810 return ContributionIndex__none
;