1 /* IELR's inadequacy annotation list. 
   3    Copyright (C) 2009-2010 Free Software Foundation, Inc. 
   5    This file is part of Bison, the GNU Compiler Compiler. 
   7    This program is free software: you can redistribute it and/or modify 
   8    it under the terms of the GNU General Public License as published by 
   9    the Free Software Foundation, either version 3 of the License, or 
  10    (at your option) any later version. 
  12    This program is distributed in the hope that it will be useful, 
  13    but WITHOUT ANY WARRANTY; without even the implied warranty of 
  14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
  15    GNU General Public License for more details. 
  17    You should have received a copy of the GNU General Public License 
  18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */ 
  23 #include "AnnotationList.h" 
  29  *   - <tt>annotations_obstackp != NULL</tt>. 
  31  *   - \c result is a new \c AnnotationList with one node whose: 
  32  *     - \c inadequacyNode member is \c NULL. 
  33  *     - \c contributions member is allocated with \c contribution_count 
  34  *       uninitialized elements. 
  35  *   - All memory was allocated on \c annotations_obstackp. 
  37 static AnnotationList
* 
  38 AnnotationList__alloc_on_obstack (ContributionIndex contribution_count
, 
  39                                   struct obstack 
*annotations_obstackp
) 
  41   AnnotationList 
*result
; 
  42   size_t contributions_size 
= 
  43     contribution_count 
* sizeof result
->contributions
[0]; 
  44   result 
= obstack_alloc (annotations_obstackp
, 
  45                           offsetof (AnnotationList
, contributions
) 
  46                           + contributions_size
); 
  48   result
->inadequacyNode 
= NULL
; 
  54  *   - <tt>self != NULL</tt>. 
  55  *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>. 
  57  *   - \c result = true iff contribution \c ci in \c self represents an 
  58  *     "always" contribution. 
  61 AnnotationList__isContributionAlways (AnnotationList 
const *self
, 
  64   aver (0 <= ci 
&& ci 
< self
->inadequacyNode
->contributionCount
); 
  65   return self
->contributions
[ci
] == NULL
; 
  70  *   - \c self is a single node. 
  71  *   - \c self annotates the same state as every other node in \c list, and 
  72  *     that state has \c nitems kernel items. 
  74  *   - If the list \c list already contains an identical annotation to \c self, 
  75  *     \c self was discarded, \c result is false, and the caller is responsible 
  76  *     for the memory of \c self. 
  77  *   - Otherwise, \c list now contains the node \c self, \c result is true, and 
  78  *     \c list assumes responsibility for the memory of \c self. 
  79  *   - The sort in \c list is: 
  80  *     - Sort in reverse order on the unique ID of the associated 
  81  *       inadequacy node.  Because these IDs are assigned in ascending 
  82  *       order, this should mean that the insertion position within an 
  83  *       annotation list is usually near the beginning with other 
  84  *       annotations associated with the same inadequacy. 
  85  *     - Next, sort on the first contribution that is different as follows: 
  86  *       - Sort an always-contribution before a never-contribution before a 
  87  *         potential-contribution. 
  88  *       - Two always-contributions are identical. 
  89  *       - Two never-contributions are identical. 
  90  *       - For two potential-contributions, sort on the contributions' kernel 
  91  *         item bitsets interpreted as binary numbers. 
  92  *  - The sorting has a few effects: 
  93  *    - It accelerates elimination of identical annotations during insertion. 
  94  *    - It determines how the output of \c AnnotationList__debug is sorted. 
  95  *    - Other than that, it's probably not important. 
  98 AnnotationList__insertInto (AnnotationList 
*self
, AnnotationList 
**list
, 
 101   AnnotationList 
**node
; 
 102   for (node 
= list
; *node
; node 
= &(*node
)->next
) 
 105       ContributionIndex ci
; 
 106       if (self
->inadequacyNode
->id 
< (*node
)->inadequacyNode
->id
) 
 108       else if ((*node
)->inadequacyNode
->id 
< self
->inadequacyNode
->id
) 
 112              cmp 
== 0 && ci 
< self
->inadequacyNode
->contributionCount
; 
 115             if (AnnotationList__isContributionAlways (self
, ci
)) 
 117                 if (!AnnotationList__isContributionAlways (*node
, ci
)) 
 120             else if (AnnotationList__isContributionAlways (*node
, ci
)) 
 125                 for (item 
= 0; cmp 
== 0 && item 
< nitems
; ++item
) 
 127                     if (!Sbitset__test (self
->contributions
[ci
], item
)) 
 129                         if (Sbitset__test ((*node
)->contributions
[ci
], item
)) 
 132                     else if (!Sbitset__test ((*node
)->contributions
[ci
], item
)) 
 155 AnnotationList__compute_shift_tokens (transitions 
*trans
) 
 157   bitset shift_tokens 
= bitset_create (ntokens
, BITSET_FIXED
); 
 159   FOR_EACH_SHIFT (trans
, i
) 
 160     bitset_set (shift_tokens
, TRANSITION_SYMBOL (trans
, i
)); 
 165 AnnotationList__compute_conflicted_tokens (bitset shift_tokens
, 
 168   bitset conflicted_tokens 
= bitset_create (ntokens
, BITSET_FIXED
); 
 169   bitset conflicted_tokens_rule 
= bitset_create (ntokens
, BITSET_FIXED
); 
 170   bitset tokens 
= bitset_create (ntokens
, BITSET_FIXED
); 
 173   bitset_copy (tokens
, shift_tokens
); 
 174   for (i 
= 0; i 
< reds
->num
; ++i
) 
 176       bitset_and (conflicted_tokens_rule
, tokens
, reds
->lookahead_tokens
[i
]); 
 177       bitset_or (conflicted_tokens
, 
 178                  conflicted_tokens
, conflicted_tokens_rule
); 
 179       bitset_or (tokens
, tokens
, reds
->lookahead_tokens
[i
]); 
 180       /* Check that rules are sorted on rule number or the next step in 
 181          AnnotationList__compute_from_inadequacies will misbehave.  */ 
 182       aver (i 
== 0 || reds
->rules
[i
-1] < reds
->rules
[i
]); 
 185   bitset_free (tokens
); 
 186   bitset_free (conflicted_tokens_rule
); 
 188   return conflicted_tokens
; 
 192 AnnotationList__compute_lhs_contributions (state 
*s
, rule 
*the_rule
, 
 193                                            symbol_number conflicted_token
, 
 194                                            bitsetv follow_kernel_items
, 
 195                                            bitsetv always_follows
, 
 196                                            state 
***predecessors
, 
 197                                            bitset 
**item_lookahead_sets
, 
 200                                              *annotations_obstackp
) 
 202   goto_number lhs_goto 
= map_goto (s
->number
, the_rule
->lhs
->number
); 
 203   if (bitset_test (always_follows
[lhs_goto
], conflicted_token
)) 
 205   *items 
= Sbitset__new_on_obstack (s
->nitems
, annotations_obstackp
); 
 207     bitset_iterator biter_item
; 
 209     BITSET_FOR_EACH (biter_item
, follow_kernel_items
[lhs_goto
], item
, 0) 
 210       if (ielr_item_has_lookahead (s
, 0, item
, conflicted_token
, 
 211                                    predecessors
, item_lookahead_sets
)) 
 212         Sbitset__set (*items
, item
); 
 218 AnnotationList__computePredecessorAnnotations (AnnotationList 
*self
, state 
*s
, 
 219                                                bitsetv follow_kernel_items
, 
 220                                                bitsetv always_follows
, 
 221                                                state 
***predecessors
, 
 222                                                bitset 
**item_lookahead_sets
, 
 228                                                  *annotations_obstackp
) 
 231   for (predecessor 
= predecessors
[s
->number
]; *predecessor
; ++predecessor
) 
 233       AnnotationList 
*annotation_node 
= 
 234         AnnotationList__alloc_on_obstack ( 
 235           self
->inadequacyNode
->contributionCount
, annotations_obstackp
); 
 236       annotation_node
->inadequacyNode 
= self
->inadequacyNode
; 
 237       bool potential_contribution 
= false; 
 238       bitset 
*lookaheads 
= NULL
; 
 240         ContributionIndex ci
; 
 241         for (ci 
= 0; ci 
< self
->inadequacyNode
->contributionCount
; ++ci
) 
 243             symbol_number contribution_token 
= 
 244               InadequacyList__getContributionToken (self
->inadequacyNode
, ci
) 
 246             if (AnnotationList__isContributionAlways (self
, ci
)) 
 248                 annotation_node
->contributions
[ci
] = NULL
; 
 251             annotation_node
->contributions
[ci
] = 
 252               Sbitset__new_on_obstack ((*predecessor
)->nitems
, 
 253                                        annotations_obstackp
); 
 255               size_t predecessor_item 
= 0; 
 257               Sbitset__Index self_item
; 
 258               SBITSET__FOR_EACH (self
->contributions
[ci
], s
->nitems
, 
 259                                  sbiter_item
, self_item
) 
 261                   /* If this kernel item is the beginning of a RHS, it must be 
 262                      the kernel item in the start state, and so it has an empty 
 263                      lookahead set.  Thus, it can't contribute to inadequacies, 
 264                      and so it should never have been identified as a 
 265                      contribution.  If, instead, this kernel item is the 
 266                      successor of the start state's kernel item, the lookahead 
 267                      set is still empty, and so it also should never have been 
 268                      identified as a contribution.  This situation is fortunate 
 269                      because we want to avoid the - 2 below in both cases.  */ 
 270                   aver (s
->items
[self_item
] > 1); 
 271                   /* If this kernel item is next to the beginning of the RHS, 
 272                      then check all of the predecessor's goto follows for the 
 274                   if (item_number_is_rule_number (ritem
[s
->items
[self_item
] 
 279                       for (rulei 
= s
->items
[self_item
]; 
 280                            !item_number_is_rule_number (ritem
[rulei
]); 
 283                       if (AnnotationList__compute_lhs_contributions ( 
 285                             &rules
[item_number_as_rule_number (ritem
[rulei
])], 
 287                             follow_kernel_items
, always_follows
, predecessors
, 
 288                             item_lookahead_sets
, &items
, annotations_obstackp
)) 
 290                           obstack_free (annotations_obstackp
, 
 291                                         annotation_node
->contributions
[ci
]); 
 292                           annotation_node
->contributions
[ci
] = NULL
; 
 297                           Sbitset__or (annotation_node
->contributions
[ci
], 
 298                                        annotation_node
->contributions
[ci
], 
 299                                        items
, (*predecessor
)->nitems
); 
 300                           obstack_free (annotations_obstackp
, items
); 
 303                   /* If this kernel item is later in the RHS, then check the 
 304                      predecessor item's lookahead set.  */ 
 307                       /* We don't have to start the predecessor item search at 
 308                          the beginning every time because items from both 
 309                          states are sorted by their indices in ritem.  */ 
 311                            predecessor_item 
< (*predecessor
)->nitems
; 
 313                         if ((*predecessor
)->items
[predecessor_item
] 
 314                             == s
->items
[self_item
] - 1) 
 316                       aver (predecessor_item 
!= (*predecessor
)->nitems
); 
 317                       if (ielr_item_has_lookahead (*predecessor
, 0, 
 321                                                    item_lookahead_sets
)) 
 322                         Sbitset__set (annotation_node
->contributions
[ci
], 
 327             if (annotation_node
->contributions
[ci
]) 
 331                 SBITSET__FOR_EACH (annotation_node
->contributions
[ci
], 
 332                                    (*predecessor
)->nitems
, biter
, i
) 
 334                     potential_contribution 
= true; 
 338                         lookaheads 
= xnmalloc ((*predecessor
)->nitems
, 
 340                         for (j 
= 0; j 
< (*predecessor
)->nitems
; ++j
) 
 341                           lookaheads
[j
] = NULL
; 
 344                       lookaheads
[i
] = bitset_create (ntokens
, BITSET_FIXED
); 
 345                     bitset_set (lookaheads
[i
], contribution_token
); 
 351       /* If the predecessor has any contributions besides just "always" and 
 352          "never" contributions: 
 353          - If the dominant contribution is split-stable, the annotation could 
 354            not affect merging on this predecessor state or its eventual 
 355            predecessor states.  Moreover, all contributions that affect 
 356            whether the dominant contribution remains dominant must be "always" 
 357            or "never" contributions in order for the dominant contribution to 
 358            be split-stable.  Thus, the dominant contribution computation result 
 359            in eventual successor states will not be affected by lookaheads 
 360            tracked for this predecessor state.  (Also, as in the isocore 
 361            compatibility test, we depend on the fact that isocores with equal 
 362            dominant contributions will have the same dominant contribution when 
 363            merged.  Otherwise, we might have to worry that the presence of a 
 364            potential contribution might somehow be the culprit of that behavior 
 365            and thus need to be tracked regardless of the split stability of the 
 366            dominant contribution.)  Thus, go ahead and discard the annotation 
 367            to save space now plus time during state splitting. 
 368          - Otherwise, record the annotation, and compute any resulting 
 369            annotations needed on predecessor states.  */ 
 370       if (potential_contribution
) 
 372           if (ContributionIndex__none
 
 373               != AnnotationList__computeDominantContribution ( 
 374                    annotation_node
, (*predecessor
)->nitems
, lookaheads
, true)) 
 376               obstack_free (annotations_obstackp
, annotation_node
); 
 377               annotation_node 
= NULL
; 
 381             for (i 
= 0; i 
< (*predecessor
)->nitems
; ++i
) 
 383                 bitset_free (lookaheads
[i
]); 
 388               if (AnnotationList__insertInto (annotation_node
, 
 389                                               &annotation_lists
[(*predecessor
) 
 391                                               (*predecessor
)->nitems
)) 
 393                   ++annotation_counts
[(*predecessor
)->number
]; 
 394                   AnnotationList__computePredecessorAnnotations ( 
 395                     annotation_node
, *predecessor
, 
 396                     follow_kernel_items
, always_follows
, predecessors
, 
 397                     item_lookahead_sets
, annotation_lists
, annotation_counts
, 
 398                     annotations_obstackp
); 
 401                 obstack_free (annotations_obstackp
, annotation_node
); 
 405         obstack_free (annotations_obstackp
, annotation_node
); 
 410 AnnotationList__compute_from_inadequacies ( 
 411   state 
*s
, bitsetv follow_kernel_items
, bitsetv always_follows
, 
 412   state 
***predecessors
, bitset 
**item_lookahead_sets
, 
 413   InadequacyList 
**inadequacy_lists
, AnnotationList 
**annotation_lists
, 
 414   AnnotationIndex 
*annotation_counts
, 
 415   ContributionIndex 
*max_contributionsp
, 
 416   struct obstack 
*annotations_obstackp
, 
 417   InadequacyListNodeCount 
*inadequacy_list_node_count
) 
 419   bitsetv all_lookaheads
; 
 421   bitset conflicted_tokens
; 
 422   bitset_iterator biter_conflict
; 
 423   bitset_bindex conflicted_token
; 
 425   /* Return an empty list if s->lookahead_tokens = NULL.  */ 
 429   all_lookaheads 
= bitsetv_create (s
->nitems
, ntokens
, BITSET_FIXED
); 
 430   bitsetv_ones (all_lookaheads
); 
 431   shift_tokens 
= AnnotationList__compute_shift_tokens (s
->transitions
); 
 433     AnnotationList__compute_conflicted_tokens (shift_tokens
, s
->reductions
); 
 435   /* Add an inadequacy annotation for each conflicted_token.  */ 
 436   BITSET_FOR_EACH (biter_conflict
, conflicted_tokens
, conflicted_token
, 0) 
 438       AnnotationList 
*annotation_node
; 
 439       /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient?  Now 
 440          or convert it inside InadequacyList__new_conflict?  */ 
 441       bitset actions 
= bitset_create (s
->reductions
->num 
+ 1, BITSET_FIXED
); 
 442       ContributionIndex contribution_count 
= 0; 
 443       bool potential_contribution 
= false; 
 445       /* Allocate the annotation node.  */ 
 448         for (rule_i 
= 0; rule_i 
< s
->reductions
->num
; ++rule_i
) 
 449           if (bitset_test (s
->reductions
->lookahead_tokens
[rule_i
], 
 451             ++contribution_count
; 
 452         if (bitset_test (shift_tokens
, conflicted_token
)) 
 453           ++contribution_count
; 
 455           AnnotationList__alloc_on_obstack (contribution_count
, 
 456                                             annotations_obstackp
); 
 459       /* Add a contribution for each reduction that has conflicted_token as a 
 462         ContributionIndex ci 
= 0; 
 465         for (rule_i 
= 0; rule_i 
< s
->reductions
->num
; ++rule_i
) 
 467             rule 
*the_rule 
= s
->reductions
->rules
[rule_i
]; 
 468             if (bitset_test (s
->reductions
->lookahead_tokens
[rule_i
], 
 471                 bitset_set (actions
, rule_i
); 
 472                 /* If this reduction is on a kernel item, just add it.  */ 
 473                 if (!item_number_is_rule_number (the_rule
->rhs
[0])) 
 475                     annotation_node
->contributions
[ci
] = 
 476                       Sbitset__new_on_obstack (s
->nitems
, 
 477                                                annotations_obstackp
); 
 478                     /* Catch item_i up to rule_i.  This works because both are 
 479                        sorted on rule number.  */ 
 480                     while (!item_number_is_rule_number ( 
 481                              ritem
[s
->items
[item_i
]]) 
 482                            || item_number_as_rule_number ( 
 483                                 ritem
[s
->items
[item_i
]]) 
 487                         aver (item_i 
< s
->nitems
); 
 489                     Sbitset__set (annotation_node
->contributions
[ci
], item_i
); 
 491                 /* Otherwise, add the kernel items whose lookahead sets 
 492                    contribute the conflicted token to this reduction's 
 494                 else if (AnnotationList__compute_lhs_contributions ( 
 495                            s
, the_rule
, conflicted_token
, follow_kernel_items
, 
 496                            always_follows
, predecessors
, item_lookahead_sets
, 
 497                            &annotation_node
->contributions
[ci
], 
 498                            annotations_obstackp
)) 
 500                     annotation_node
->contributions
[ci
++] = NULL
; 
 503                 /* The lookahead token has to come from somewhere.  */ 
 504                 aver (!Sbitset__isEmpty (annotation_node
->contributions
[ci
], 
 507                 potential_contribution 
= true; 
 512       /* If there are any contributions besides just "always" contributions: 
 513          - If there's also a shift contribution, record it. 
 514          - If the dominant contribution is split-stable, then the annotation 
 515            could not affect merging, so go ahead and discard the annotation and 
 516            the inadequacy to save space now plus time during state splitting. 
 517          - Otherwise, record the annotation and the inadequacy, and compute any 
 518            resulting annotations needed on predecessor states.  */ 
 519       if (potential_contribution
) 
 521           if (bitset_test (shift_tokens
, conflicted_token
)) 
 523               bitset_set (actions
, s
->reductions
->num
); 
 524               annotation_node
->contributions
[contribution_count 
- 1] = NULL
; 
 527             InadequacyList 
*conflict_node 
= 
 528               InadequacyList__new_conflict ( 
 529                 s
, symbols
[conflicted_token
], actions
, 
 530                 inadequacy_list_node_count
); 
 532             annotation_node
->inadequacyNode 
= conflict_node
; 
 533             if (ContributionIndex__none
 
 534                 != AnnotationList__computeDominantContribution ( 
 535                      annotation_node
, s
->nitems
, all_lookaheads
, true)) 
 537                 obstack_free (annotations_obstackp
, annotation_node
); 
 538                 InadequacyList__delete (conflict_node
); 
 542                 InadequacyList__prependTo (conflict_node
, 
 543                                            &inadequacy_lists
[s
->number
]); 
 544                 aver (AnnotationList__insertInto ( 
 545                         annotation_node
, &annotation_lists
[s
->number
], 
 547                 /* This aver makes sure the 
 548                    AnnotationList__computeDominantContribution check above 
 549                    does discard annotations in the simplest case of a S/R 
 550                    conflict with no token precedence.  */ 
 551                 aver (!bitset_test (shift_tokens
, conflicted_token
) 
 552                       || symbols
[conflicted_token
]->prec
); 
 553                 ++annotation_counts
[s
->number
]; 
 554                 if (contribution_count 
> *max_contributionsp
) 
 555                   *max_contributionsp 
= contribution_count
; 
 556                 AnnotationList__computePredecessorAnnotations ( 
 558                   follow_kernel_items
, always_follows
, predecessors
, 
 559                   item_lookahead_sets
, annotation_lists
, annotation_counts
, 
 560                   annotations_obstackp
); 
 566           bitset_free (actions
); 
 567           obstack_free (annotations_obstackp
, annotation_node
); 
 571   bitsetv_free (all_lookaheads
); 
 572   bitset_free (shift_tokens
); 
 573   bitset_free (conflicted_tokens
); 
 577 AnnotationList__debug (AnnotationList 
const *self
, size_t nitems
, int spaces
) 
 579   AnnotationList 
const *a
; 
 581   for (a 
= self
, ai 
= 0; a
; a 
= a
->next
, ++ai
) 
 585         for (j 
= 0; j 
< spaces
; ++j
) 
 588       fprintf (stderr
, "Annotation %d (manifesting state %d):\n", 
 589                ai
, a
->inadequacyNode
->manifestingState
->number
); 
 591         ContributionIndex ci
; 
 592         bitset_bindex rulei 
= 0; /* init suppresses compiler warning */ 
 593         rulei 
= bitset_first (a
->inadequacyNode
->inadequacy
.conflict
.actions
); 
 594         for (ci 
= 0; ci 
< a
->inadequacyNode
->contributionCount
; ++ci
) 
 596             symbol_number token 
= 
 597               InadequacyList__getContributionToken (a
->inadequacyNode
, ci
) 
 601               for (j 
= 0; j 
< spaces
+2; ++j
) 
 604             if (ci 
== InadequacyList__getShiftContributionIndex ( 
 606               fprintf (stderr
, "Contributes shift of token %d.\n", token
); 
 609                 fprintf (stderr
, "Contributes token %d", token
); 
 610                 aver (rulei 
!= BITSET_BINDEX_MAX
); 
 611                 fprintf (stderr
, " as lookahead, rule number %d", 
 612                          a
->inadequacyNode
->manifestingState
 
 613                            ->reductions
->rules
[rulei
]->number
); 
 615                   bitset_next (a
->inadequacyNode
->inadequacy
.conflict
.actions
, 
 617                 if (AnnotationList__isContributionAlways (a
, ci
)) 
 618                   fprintf (stderr
, " always."); 
 621                     fprintf (stderr
, ", items: "); 
 622                     Sbitset__fprint (a
->contributions
[ci
], nitems
, stderr
); 
 624                 fprintf (stderr
, "\n"); 
 632 AnnotationList__computeLookaheadFilter (AnnotationList 
const *self
, 
 634                                         bitsetv lookahead_filter
) 
 636   bitsetv_zero (lookahead_filter
); 
 637   for (; self
; self 
= self
->next
) 
 639       ContributionIndex ci
; 
 640       for (ci 
= 0; ci 
< self
->inadequacyNode
->contributionCount
; ++ci
) 
 641         if (!AnnotationList__isContributionAlways (self
, ci
)) 
 645             symbol_number token 
= 
 646               InadequacyList__getContributionToken (self
->inadequacyNode
, ci
) 
 648             SBITSET__FOR_EACH (self
->contributions
[ci
], nitems
, biter
, item
) 
 649               bitset_set (lookahead_filter
[item
], token
); 
 656  *   - <tt>self != NULL</tt>. 
 657  *   - \c nitems is the number of kernel items in the LR(0) state that \c self 
 659  *   - \c lookaheads describes the lookahead sets on the kernel items of some 
 660  *     isocore of the LR(0) state that \c self annotates.  Either: 
 661  *     - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel 
 663  *     - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either: 
 664  *       - \c NULL only if the lookahead set on kernel item \c i is empty. 
 665  *       - The (possibly empty) lookahead set on kernel item \c i. 
 666  *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>. 
 668  *   - \c result = true iff contribution \c ci in \c self is made by the state 
 669  *     described by \c lookaheads. 
 672 AnnotationList__stateMakesContribution (AnnotationList 
const *self
, 
 673                                         size_t nitems
, ContributionIndex ci
, 
 676   if (AnnotationList__isContributionAlways (self
, ci
)) 
 681     symbol_number token 
= 
 682       InadequacyList__getContributionToken (self
->inadequacyNode
, ci
)->number
; 
 685     SBITSET__FOR_EACH (self
->contributions
[ci
], nitems
, biter
, item
) 
 686       if (lookaheads
[item
] && bitset_test (lookaheads
[item
], token
)) 
 693 AnnotationList__computeDominantContribution (AnnotationList 
const *self
, 
 694                                              size_t nitems
, bitset 
*lookaheads
, 
 695                                              bool require_split_stable
) 
 698   ContributionIndex 
const ci_shift 
= 
 699     InadequacyList__getShiftContributionIndex (self
->inadequacyNode
); 
 701   token 
= self
->inadequacyNode
->inadequacy
.conflict
.token
; 
 704   if (ci_shift 
!= ContributionIndex__none
) 
 706       bool find_stable_domination_over_shift 
= false; 
 707       bool find_stable_error_action_domination 
= false; 
 709         ContributionIndex ci
; 
 711         ContributionIndex ci_rr_dominator 
= ContributionIndex__none
; 
 712         int shift_precedence 
= token
->prec
; 
 714         /* If the token has no precedence set, shift is always chosen.  */ 
 715         if (!shift_precedence
) 
 718         /* Figure out which reductions contribute, which of those would 
 719            dominate in a R/R comparison, and whether any reduction dominates 
 720            the shift so that the R/R comparison is actually needed.  */ 
 721         for (ci 
= 0, actioni 
= bitset_first (self
->inadequacyNode
->inadequacy
 
 723              ci 
< self
->inadequacyNode
->contributionCount
; 
 724              ++ci
, actioni 
= bitset_next (self
->inadequacyNode
->inadequacy
 
 725                                           .conflict
.actions
, actioni
+1)) 
 727             int reduce_precedence 
= 0; 
 731               rule 
*r 
= self
->inadequacyNode
->manifestingState
 
 732                           ->reductions
->rules
[actioni
]; 
 734                 reduce_precedence 
= r
->prec
->prec
; 
 736             /* If there's no need to check whether this reduction actually 
 737                contributes because the shift eliminates it from the R/R 
 738                comparison anyway, continue to the next reduction.  */ 
 739             if (reduce_precedence
 
 740                 && (reduce_precedence 
< shift_precedence
 
 741                     || (reduce_precedence 
== shift_precedence
 
 742                         && token
->assoc 
== right_assoc
))) 
 744             if (!AnnotationList__stateMakesContribution (self
, nitems
, ci
, 
 747             /* This uneliminated reduction contributes, so see if it can cause 
 749             if (reduce_precedence 
== shift_precedence
 
 750                  && token
->assoc 
== non_assoc
) 
 752                 /* It's not possible to find split-stable domination over 
 753                    shift after a potential %nonassoc.  */ 
 754                 if (find_stable_domination_over_shift
) 
 755                   return ContributionIndex__none
; 
 756                 if (!require_split_stable
 
 757                     || AnnotationList__isContributionAlways (self
, ci
)) 
 758                   return ContributionIndex__error_action
; 
 759                 find_stable_error_action_domination 
= true; 
 761             /* Consider this uneliminated contributing reduction in the R/R 
 763             if (ci_rr_dominator 
== ContributionIndex__none
) 
 764               ci_rr_dominator 
= ci
; 
 765             /* If precedence is set for this uneliminated contributing 
 766                reduction, it dominates the shift, so try to figure out which 
 767                reduction dominates the R/R comparison.  */ 
 768             if (reduce_precedence
) 
 770                 /* It's not possible to find split-stable error action 
 771                    domination after a potential reduction.  */ 
 772                 if (find_stable_error_action_domination
) 
 773                   return ContributionIndex__none
; 
 774                 if (!require_split_stable
) 
 775                   return ci_rr_dominator
; 
 776                 if (!AnnotationList__isContributionAlways (self
, 
 778                   return ContributionIndex__none
; 
 779                 if (AnnotationList__isContributionAlways (self
, ci
)) 
 780                   return ci_rr_dominator
; 
 781                 find_stable_domination_over_shift 
= true; 
 785       if (find_stable_domination_over_shift
 
 786           || find_stable_error_action_domination
) 
 787         return ContributionIndex__none
; 
 788       /* No reduce or error action domination found, so shift dominates.  */ 
 792   /* R/R conflict, so the reduction with the lowest rule number dominates. 
 793      Fortunately, contributions are sorted by rule number.  */ 
 795     ContributionIndex ci
; 
 796     for (ci 
= 0; ci 
< self
->inadequacyNode
->contributionCount
; ++ci
) 
 797       if (AnnotationList__stateMakesContribution (self
, nitems
, ci
, 
 800           if (require_split_stable
 
 801               && !AnnotationList__isContributionAlways (self
, ci
)) 
 802             return ContributionIndex__none
; 
 806   return ContributionIndex__none
;