]> git.saurik.com Git - bison.git/blame - src/AnnotationList.c
Document how `%define "var" "value"' is not M4-friendly.
[bison.git] / src / AnnotationList.c
CommitLineData
7fe11bb5
JD
1/* IELR's inadequacy annotation list.
2
3 Copyright (C) 2009 Free Software Foundation, Inc.
4
5 This file is part of Bison, the GNU Compiler Compiler.
6
7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20#include <config.h>
21#include "system.h"
22
23#include "AnnotationList.h"
24#include "lalr.h"
25#include "ielr.h"
26
27/**
28 * \pre
29 * - <tt>annotations_obstackp != NULL</tt>.
30 * \post
31 * - \c result is a new \c AnnotationList with one node whose:
32 * - \c inadequacyNode member is \c NULL.
33 * - \c contributions member is allocated with \c contribution_count
34 * uninitialized elements.
35 * - All memory was allocated on \c annotations_obstackp.
36 */
37static AnnotationList*
38AnnotationList__alloc_on_obstack (ContributionIndex contribution_count,
39 struct obstack *annotations_obstackp)
40{
41 AnnotationList *result;
42 size_t contributions_size =
43 contribution_count * sizeof result->contributions[0];
44 result = obstack_alloc (annotations_obstackp,
45 offsetof (AnnotationList, contributions)
46 + contributions_size);
47 result->next = NULL;
48 result->inadequacyNode = NULL;
49 return result;
50}
51
52/**
53 * \pre
54 * - <tt>self != NULL</tt>.
55 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
56 * \post
57 * - \c result = true iff contribution \c ci in \c self represents an
58 * "always" contribution.
59 */
60static bool
61AnnotationList__isContributionAlways (AnnotationList const *self,
62 ContributionIndex ci)
63{
64 aver (0 <= ci && ci < self->inadequacyNode->contributionCount);
65 return self->contributions[ci] == NULL;
66}
67
68/**
69 * \pre
70 * - \c self is a single node.
71 * - \c self annotates the same state as every other node in \c list, and
72 * that state has \c nitems kernel items.
73 * \post
74 * - If the list \c list already contains an identical annotation to \c self,
75 * \c self was discarded, \c result is false, and the caller is responsible
76 * for the memory of \c self.
77 * - Otherwise, \c list now contains the node \c self, \c result is true, and
78 * \c list assumes responsibility for the memory of \c self.
79 * - The sort in \c list is:
80 * - Sort in reverse order on memory address of the associated inadequacy
81 * node. Because memory is usually allocated in ascending order (FIXME:
82 * Is this true enough? Should we keep some sort of global index to
83 * guarantee it?), this should mean that the insertion position within an
84 * annotation list is usually near the beginning with other annotations
85 * associated with the same inadequacy.
86 * - Next, sort on the first contribution that is different as follows:
87 * - Sort an always-contribution before a never-contribution before a
88 * potential-contribution.
89 * - Two always-contributions are identical.
90 * - Two never-contributions are identical.
91 * - For two potential-contributions, sort on the contributions' kernel
92 * item bitsets interpreted as binary numbers.
93 * - The sorting has a few effects:
94 * - It accelerates elimination of identical annotations during insertion.
95 * - It determines how the output of \c AnnotationList__debug is sorted.
96 * - Other than that, it's probably not important.
97 */
98static bool
99AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
100 size_t nitems)
101{
102 AnnotationList **node;
103 for (node = list; *node; node = &(*node)->next)
104 {
105 int cmp = 0;
106 ContributionIndex ci;
107 if (self->inadequacyNode < (*node)->inadequacyNode)
108 cmp = 1;
109 else if ((*node)->inadequacyNode < self->inadequacyNode)
110 cmp = -1;
111 else
112 for (ci = 0;
113 cmp == 0 && ci < self->inadequacyNode->contributionCount;
114 ++ci)
115 {
116 if (AnnotationList__isContributionAlways (self, ci))
117 {
118 if (!AnnotationList__isContributionAlways (*node, ci))
119 cmp = -1;
120 }
121 else if (AnnotationList__isContributionAlways (*node, ci))
122 cmp = 1;
123 else
124 {
125 size_t item;
126 for (item = 0; cmp == 0 && item < nitems; ++item)
127 {
128 if (!Sbitset__test (self->contributions[ci], item))
129 {
130 if (Sbitset__test ((*node)->contributions[ci], item))
131 cmp = -1;
132 }
133 else if (!Sbitset__test ((*node)->contributions[ci], item))
134 cmp = 1;
135 }
136 }
137 }
138 if (cmp < 0)
139 {
140 self->next = *node;
141 *node = self;
142 break;
143 }
144 else if (cmp == 0)
145 {
146 self = NULL;
147 break;
148 }
149 }
150 if (!*node)
151 *node = self;
152 return self != NULL;
153}
154
155static bitset
156AnnotationList__compute_shift_tokens (transitions *trans)
157{
158 bitset shift_tokens = bitset_create (ntokens, BITSET_FIXED);
159 int i;
160 FOR_EACH_SHIFT (trans, i)
161 bitset_set (shift_tokens, TRANSITION_SYMBOL (trans, i));
162 return shift_tokens;
163}
164
165static bitset
166AnnotationList__compute_conflicted_tokens (bitset shift_tokens,
167 reductions *reds)
168{
169 bitset conflicted_tokens = bitset_create (ntokens, BITSET_FIXED);
170 bitset conflicted_tokens_rule = bitset_create (ntokens, BITSET_FIXED);
171 bitset tokens = bitset_create (ntokens, BITSET_FIXED);
172 int i;
173
174 bitset_copy (tokens, shift_tokens);
175 for (i = 0; i < reds->num; ++i)
176 {
177 bitset_and (conflicted_tokens_rule, tokens, reds->lookahead_tokens[i]);
178 bitset_or (conflicted_tokens,
179 conflicted_tokens, conflicted_tokens_rule);
180 bitset_or (tokens, tokens, reds->lookahead_tokens[i]);
181 /* Check that rules are sorted on rule number or the next step in
182 AnnotationList__compute_from_inadequacies will misbehave. */
183 aver (i == 0 || reds->rules[i-1] < reds->rules[i]);
184 }
185
186 bitset_free (tokens);
187 bitset_free (conflicted_tokens_rule);
188
189 return conflicted_tokens;
190}
191
192static bool
193AnnotationList__compute_lhs_contributions (state *s, rule *the_rule,
194 symbol_number conflicted_token,
195 bitsetv follow_kernel_items,
196 bitsetv always_follows,
197 state ***predecessors,
198 bitset **item_lookahead_sets,
199 Sbitset *items,
200 struct obstack
201 *annotations_obstackp)
202{
203 goto_number lhs_goto = map_goto (s->number, the_rule->lhs->number);
204 if (bitset_test (always_follows[lhs_goto], conflicted_token))
205 return true;
206 *items = Sbitset__new_on_obstack (s->nitems, annotations_obstackp);
207 {
208 bitset_iterator biter_item;
209 bitset_bindex item;
210 BITSET_FOR_EACH (biter_item, follow_kernel_items[lhs_goto], item, 0)
211 if (ielr_item_has_lookahead (s, 0, item, conflicted_token,
212 predecessors, item_lookahead_sets))
213 Sbitset__set (*items, item);
214 }
215 return false;
216}
217
218static void
219AnnotationList__computePredecessorAnnotations (AnnotationList *self, state *s,
220 bitsetv follow_kernel_items,
221 bitsetv always_follows,
222 state ***predecessors,
223 bitset **item_lookahead_sets,
224 AnnotationList
225 **annotation_lists,
226 AnnotationIndex
227 *annotation_counts,
228 struct obstack
229 *annotations_obstackp)
230{
231 state **predecessor;
232 for (predecessor = predecessors[s->number]; *predecessor; ++predecessor)
233 {
234 AnnotationList *annotation_node =
235 AnnotationList__alloc_on_obstack (
236 self->inadequacyNode->contributionCount, annotations_obstackp);
237 annotation_node->inadequacyNode = self->inadequacyNode;
238 bool potential_contribution = false;
239 bitset *lookaheads = NULL;
240 {
241 ContributionIndex ci;
242 for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
243 {
244 symbol_number contribution_token =
245 InadequacyList__getContributionToken (self->inadequacyNode, ci)
246 ->number;
247 if (AnnotationList__isContributionAlways (self, ci))
248 {
249 annotation_node->contributions[ci] = NULL;
250 continue;
251 }
252 annotation_node->contributions[ci] =
253 Sbitset__new_on_obstack ((*predecessor)->nitems,
254 annotations_obstackp);
255 {
256 size_t predecessor_item = 0;
257 Sbitset sbiter_item;
258 Sbitset__Index self_item;
259 SBITSET__FOR_EACH (self->contributions[ci], s->nitems,
260 sbiter_item, self_item)
261 {
262 /* If this kernel item is the beginning of a RHS, it must be
263 the kernel item in the start state, and so it has an empty
264 lookahead set. Thus, it can't contribute to inadequacies,
265 and so it should never have been identified as a
266 contribution. If, instead, this kernel item is the
267 successor of the start state's kernel item, the lookahead
268 set is still empty, and so it also should never have been
269 identified as a contribution. This situation is fortunate
270 because we want to avoid the - 2 below in both cases. */
271 aver (s->items[self_item] > 1);
272 /* If this kernel item is next to the beginning of the RHS,
273 then check all of the predecessor's goto follows for the
274 LHS. */
275 if (item_number_is_rule_number (ritem[s->items[self_item]
276 - 2]))
277 {
278 Sbitset items;
279 unsigned int rulei;
280 for (rulei = s->items[self_item];
281 !item_number_is_rule_number (ritem[rulei]);
282 ++rulei)
283 ;
284 if (AnnotationList__compute_lhs_contributions (
285 *predecessor,
286 &rules[item_number_as_rule_number (ritem[rulei])],
287 contribution_token,
288 follow_kernel_items, always_follows, predecessors,
289 item_lookahead_sets, &items, annotations_obstackp))
290 {
291 obstack_free (annotations_obstackp,
292 annotation_node->contributions[ci]);
293 annotation_node->contributions[ci] = NULL;
294 break;
295 }
296 else
297 {
298 Sbitset__or (annotation_node->contributions[ci],
299 annotation_node->contributions[ci],
300 items, (*predecessor)->nitems);
301 obstack_free (annotations_obstackp, items);
302 }
303 }
304 /* If this kernel item is later in the RHS, then check the
305 predecessor item's lookahead set. */
306 else
307 {
308 /* We don't have to start the predecessor item search at
309 the beginning every time because items from both
310 states are sorted by their indices in ritem. */
311 for (;
312 predecessor_item < (*predecessor)->nitems;
313 ++predecessor_item)
314 if ((*predecessor)->items[predecessor_item]
315 == s->items[self_item] - 1)
316 break;
317 aver (predecessor_item != (*predecessor)->nitems);
318 if (ielr_item_has_lookahead (*predecessor, 0,
319 predecessor_item,
320 contribution_token,
321 predecessors,
322 item_lookahead_sets))
323 Sbitset__set (annotation_node->contributions[ci],
324 predecessor_item);
325 }
326 }
327 }
328 if (annotation_node->contributions[ci])
329 {
330 Sbitset biter;
331 Sbitset__Index i;
332 SBITSET__FOR_EACH (annotation_node->contributions[ci],
333 (*predecessor)->nitems, biter, i)
334 {
335 potential_contribution = true;
336 if (!lookaheads)
337 {
338 size_t j;
339 lookaheads = xnmalloc ((*predecessor)->nitems,
340 sizeof *lookaheads);
341 for (j = 0; j < (*predecessor)->nitems; ++j)
342 lookaheads[j] = NULL;
343 }
344 if (!lookaheads[i])
345 lookaheads[i] = bitset_create (ntokens, BITSET_FIXED);
346 bitset_set (lookaheads[i], contribution_token);
347 }
348 }
349 }
350 }
351
352 /* If the predecessor has any contributions besides just "always" and
353 "never" contributions:
354 - If the dominant contribution is split-stable, the annotation could
355 not affect merging on this predecessor state or its eventual
356 predecessor states. Moreover, all contributions that affect
357 whether the dominant contribution remains dominant must be "always"
358 or "never" contributions in order for the dominant contribution to
359 be split-stable. Thus, the dominant contribution computation result
360 in eventual successor states will not be affected by lookaheads
361 tracked for this predecessor state. (Also, as in the isocore
362 compatibility test, we depend on the fact that isocores with equal
363 dominant contributions will have the same dominant contribution when
364 merged. Otherwise, we might have to worry that the presence of a
365 potential contribution might somehow be the culprit of that behavior
366 and thus need to be tracked regardless of the split stability of the
367 dominant contribution.) Thus, go ahead and discard the annotation
368 to save space now plus time during state splitting.
369 - Otherwise, record the annotation, and compute any resulting
370 annotations needed on predecessor states. */
371 if (potential_contribution)
372 {
373 if (ContributionIndex__none
374 != AnnotationList__computeDominantContribution (
375 annotation_node, (*predecessor)->nitems, lookaheads, true))
376 {
377 obstack_free (annotations_obstackp, annotation_node);
378 annotation_node = NULL;
379 }
380 {
381 size_t i;
382 for (i = 0; i < (*predecessor)->nitems; ++i)
383 if (lookaheads[i])
384 bitset_free (lookaheads[i]);
385 free (lookaheads);
386 }
387 if (annotation_node)
388 {
389 if (AnnotationList__insertInto (annotation_node,
390 &annotation_lists[(*predecessor)
391 ->number],
392 (*predecessor)->nitems))
393 {
394 ++annotation_counts[(*predecessor)->number];
395 AnnotationList__computePredecessorAnnotations (
396 annotation_node, *predecessor,
397 follow_kernel_items, always_follows, predecessors,
398 item_lookahead_sets, annotation_lists, annotation_counts,
399 annotations_obstackp);
400 }
401 else
402 obstack_free (annotations_obstackp, annotation_node);
403 }
404 }
405 else
406 obstack_free (annotations_obstackp, annotation_node);
407 }
408}
409
410void
411AnnotationList__compute_from_inadequacies (state *s,
412 bitsetv follow_kernel_items,
413 bitsetv always_follows,
414 state ***predecessors,
415 bitset **item_lookahead_sets,
416 InadequacyList **inadequacy_lists,
417 AnnotationList **annotation_lists,
418 AnnotationIndex *annotation_counts,
419 ContributionIndex
420 *max_contributionsp,
421 struct obstack
422 *annotations_obstackp)
423{
424 bitsetv all_lookaheads;
425 bitset shift_tokens;
426 bitset conflicted_tokens;
427 bitset_iterator biter_conflict;
428 bitset_bindex conflicted_token;
429
430 /* Return an empty list if s->lookahead_tokens = NULL. */
431 if (s->consistent)
432 return;
433
434 all_lookaheads = bitsetv_create (s->nitems, ntokens, BITSET_FIXED);
435 bitsetv_ones (all_lookaheads);
436 shift_tokens = AnnotationList__compute_shift_tokens (s->transitions);
437 conflicted_tokens =
438 AnnotationList__compute_conflicted_tokens (shift_tokens, s->reductions);
439
440 /* Add an inadequacy annotation for each conflicted_token. */
441 BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0)
442 {
443 AnnotationList *annotation_node;
444 /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now
445 or convert it inside InadequacyList__new_conflict? */
446 bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
447 ContributionIndex contribution_count = 0;
448 bool potential_contribution = false;
449
450 /* Allocate the annotation node. */
451 {
452 int rule_i;
453 for (rule_i = 0; rule_i < s->reductions->num; ++rule_i)
454 if (bitset_test (s->reductions->lookahead_tokens[rule_i],
455 conflicted_token))
456 ++contribution_count;
457 if (bitset_test (shift_tokens, conflicted_token))
458 ++contribution_count;
459 annotation_node =
460 AnnotationList__alloc_on_obstack (contribution_count,
461 annotations_obstackp);
462 }
463
464 /* Add a contribution for each reduction that has conflicted_token as a
465 lookahead. */
466 {
467 ContributionIndex ci = 0;
468 int item_i = 0;
469 int rule_i;
470 for (rule_i = 0; rule_i < s->reductions->num; ++rule_i)
471 {
472 rule *the_rule = s->reductions->rules[rule_i];
473 if (bitset_test (s->reductions->lookahead_tokens[rule_i],
474 conflicted_token))
475 {
476 bitset_set (actions, rule_i);
477 /* If this reduction is on a kernel item, just add it. */
478 if (!item_number_is_rule_number (the_rule->rhs[0]))
479 {
480 annotation_node->contributions[ci] =
481 Sbitset__new_on_obstack (s->nitems,
482 annotations_obstackp);
483 /* Catch item_i up to rule_i. This works because both are
484 sorted on rule number. */
485 while (!item_number_is_rule_number (
486 ritem[s->items[item_i]])
487 || item_number_as_rule_number (
488 ritem[s->items[item_i]])
489 != the_rule->number)
490 {
491 ++item_i;
492 aver (item_i < s->nitems);
493 }
494 Sbitset__set (annotation_node->contributions[ci], item_i);
495 }
496 /* Otherwise, add the kernel items whose lookahead sets
497 contribute the conflicted token to this reduction's
498 lookahead set. */
499 else if (AnnotationList__compute_lhs_contributions (
500 s, the_rule, conflicted_token, follow_kernel_items,
501 always_follows, predecessors, item_lookahead_sets,
502 &annotation_node->contributions[ci],
503 annotations_obstackp))
504 {
505 annotation_node->contributions[ci++] = NULL;
506 continue;
507 }
508 /* The lookahead token has to come from somewhere. */
509 aver (!Sbitset__isEmpty (annotation_node->contributions[ci],
510 s->nitems));
511 ++ci;
512 potential_contribution = true;
513 }
514 }
515 }
516
517 /* If there are any contributions besides just "always" contributions:
518 - If there's also a shift contribution, record it.
519 - If the dominant contribution is split-stable, then the annotation
520 could not affect merging, so go ahead and discard the annotation and
521 the inadequacy to save space now plus time during state splitting.
522 - Otherwise, record the annotation and the inadequacy, and compute any
523 resulting annotations needed on predecessor states. */
524 if (potential_contribution)
525 {
526 if (bitset_test (shift_tokens, conflicted_token))
527 {
528 bitset_set (actions, s->reductions->num);
529 annotation_node->contributions[contribution_count - 1] = NULL;
530 }
531 {
532 InadequacyList *conflict_node =
533 InadequacyList__new_conflict (s, symbols[conflicted_token],
534 actions);
535 actions = NULL;
536 annotation_node->inadequacyNode = conflict_node;
537 if (ContributionIndex__none
538 != AnnotationList__computeDominantContribution (
539 annotation_node, s->nitems, all_lookaheads, true))
540 {
541 obstack_free (annotations_obstackp, annotation_node);
542 InadequacyList__delete (conflict_node);
543 }
544 else
545 {
546 InadequacyList__prependTo (conflict_node,
547 &inadequacy_lists[s->number]);
548 aver (AnnotationList__insertInto (
549 annotation_node, &annotation_lists[s->number],
550 s->nitems));
551 /* This aver makes sure the
552 AnnotationList__computeDominantContribution check above
553 does discard annotations in the simplest case of a S/R
554 conflict with no token precedence. */
555 aver (!bitset_test (shift_tokens, conflicted_token)
556 || symbols[conflicted_token]->prec);
557 ++annotation_counts[s->number];
558 if (contribution_count > *max_contributionsp)
559 *max_contributionsp = contribution_count;
560 AnnotationList__computePredecessorAnnotations (
561 annotation_node, s,
562 follow_kernel_items, always_follows, predecessors,
563 item_lookahead_sets, annotation_lists, annotation_counts,
564 annotations_obstackp);
565 }
566 }
567 }
568 else
569 {
570 bitset_free (actions);
571 obstack_free (annotations_obstackp, annotation_node);
572 }
573 }
574
575 bitsetv_free (all_lookaheads);
576 bitset_free (shift_tokens);
577 bitset_free (conflicted_tokens);
578}
579
580void
581AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
582{
583 AnnotationList const *a;
584 AnnotationIndex ai;
585 for (a = self, ai = 0; a; a = a->next, ++ai)
586 {
587 {
588 int j;
589 for (j = 0; j < spaces; ++j)
590 putc (' ', stderr);
591 }
592 fprintf (stderr, "Annotation %d (manifesting state %d):\n",
593 ai, a->inadequacyNode->manifestingState->number);
594 {
595 ContributionIndex ci;
596 bitset_bindex rulei = 0; /* init suppresses compiler warning */
597 rulei = bitset_first (a->inadequacyNode->inadequacy.conflict.actions);
598 for (ci = 0; ci < a->inadequacyNode->contributionCount; ++ci)
599 {
600 symbol_number token =
601 InadequacyList__getContributionToken (a->inadequacyNode, ci)
602 ->number;
603 {
604 int j;
605 for (j = 0; j < spaces+2; ++j)
606 putc (' ', stderr);
607 }
608 if (ci == InadequacyList__getShiftContributionIndex (
609 a->inadequacyNode))
610 fprintf (stderr, "Contributes shift of token %d.\n", token);
611 else
612 {
613 fprintf (stderr, "Contributes token %d", token);
614 aver (rulei != BITSET_BINDEX_MAX);
615 fprintf (stderr, " as lookahead, rule number %d",
616 a->inadequacyNode->manifestingState
617 ->reductions->rules[rulei]->number);
618 rulei =
619 bitset_next (a->inadequacyNode->inadequacy.conflict.actions,
620 rulei+1);
621 if (AnnotationList__isContributionAlways (a, ci))
622 fprintf (stderr, " always.");
623 else
624 {
625 fprintf (stderr, ", items: ");
626 Sbitset__fprint (a->contributions[ci], nitems, stderr);
627 }
628 fprintf (stderr, "\n");
629 }
630 }
631 }
632 }
633}
634
635void
636AnnotationList__computeLookaheadFilter (AnnotationList const *self,
637 size_t nitems,
638 bitsetv lookahead_filter)
639{
640 bitsetv_zero (lookahead_filter);
641 for (; self; self = self->next)
642 {
643 ContributionIndex ci;
644 for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
645 if (!AnnotationList__isContributionAlways (self, ci))
646 {
647 Sbitset__Index item;
648 Sbitset biter;
649 symbol_number token =
650 InadequacyList__getContributionToken (self->inadequacyNode, ci)
651 ->number;
652 SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
653 bitset_set (lookahead_filter[item], token);
654 }
655 }
656}
657
658/**
659 * \pre
660 * - <tt>self != NULL</tt>.
661 * - \c nitems is the number of kernel items in the LR(0) state that \c self
662 * annotates.
663 * - \c lookaheads describes the lookahead sets on the kernel items of some
664 * isocore of the LR(0) state that \c self annotates. Either:
665 * - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel
666 * item is empty.
667 * - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either:
668 * - \c NULL only if the lookahead set on kernel item \c i is empty.
669 * - The (possibly empty) lookahead set on kernel item \c i.
670 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
671 * \post
672 * - \c result = true iff contribution \c ci in \c self is made by the state
673 * described by \c lookaheads.
674 */
675static bool
676AnnotationList__stateMakesContribution (AnnotationList const *self,
677 size_t nitems, ContributionIndex ci,
678 bitset *lookaheads)
679{
680 if (AnnotationList__isContributionAlways (self, ci))
681 return true;
682 if (!lookaheads)
683 return false;
684 {
685 symbol_number token =
686 InadequacyList__getContributionToken (self->inadequacyNode, ci)->number;
687 Sbitset__Index item;
688 Sbitset biter;
689 SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
690 if (lookaheads[item] && bitset_test (lookaheads[item], token))
691 return true;
692 }
693 return false;
694}
695
696ContributionIndex
697AnnotationList__computeDominantContribution (AnnotationList const *self,
698 size_t nitems, bitset *lookaheads,
699 bool require_split_stable)
700{
701 symbol *token;
702 ContributionIndex const ci_shift =
703 InadequacyList__getShiftContributionIndex (self->inadequacyNode);
704
705 token = self->inadequacyNode->inadequacy.conflict.token;
706
707 /* S/R conflict. */
708 if (ci_shift != ContributionIndex__none)
709 {
710 bool find_stable_domination_over_shift = false;
711 bool find_stable_error_action_domination = false;
712 {
713 ContributionIndex ci;
714 int actioni;
715 ContributionIndex ci_rr_dominator = ContributionIndex__none;
716 int shift_precedence = token->prec;
717
718 /* If the token has no precedence set, shift is always chosen. */
719 if (!shift_precedence)
720 return ci_shift;
721
722 /* Figure out which reductions contribute, which of those would
723 dominate in a R/R comparison, and whether any reduction dominates
724 the shift so that the R/R comparison is actually needed. */
725 for (ci = 0, actioni = bitset_first (self->inadequacyNode->inadequacy
726 .conflict.actions);
727 ci < self->inadequacyNode->contributionCount;
728 ++ci, actioni = bitset_next (self->inadequacyNode->inadequacy
729 .conflict.actions, actioni+1))
730 {
731 int reduce_precedence = 0;
732 if (ci == ci_shift)
733 continue;
734 {
735 rule *r = self->inadequacyNode->manifestingState
736 ->reductions->rules[actioni];
737 if (r->prec)
738 reduce_precedence = r->prec->prec;
739 }
740 /* If there's no need to check whether this reduction actually
741 contributes because the shift eliminates it from the R/R
742 comparison anyway, continue to the next reduction. */
743 if (reduce_precedence
744 && (reduce_precedence < shift_precedence
745 || (reduce_precedence == shift_precedence
746 && token->assoc == right_assoc)))
747 continue;
748 if (!AnnotationList__stateMakesContribution (self, nitems, ci,
749 lookaheads))
750 continue;
751 /* This uneliminated reduction contributes, so see if it can cause
752 an error action. */
753 if (reduce_precedence == shift_precedence
754 && token->assoc == non_assoc)
755 {
756 /* It's not possible to find split-stable domination over
757 shift after a potential %nonassoc. */
758 if (find_stable_domination_over_shift)
759 return ContributionIndex__none;
760 if (!require_split_stable
761 || AnnotationList__isContributionAlways (self, ci))
762 return ContributionIndex__error_action;
763 find_stable_error_action_domination = true;
764 }
765 /* Consider this uneliminated contributing reduction in the R/R
766 comparison. */
767 if (ci_rr_dominator == ContributionIndex__none)
768 ci_rr_dominator = ci;
769 /* If precedence is set for this uneliminated contributing
770 reduction, it dominates the shift, so try to figure out which
771 reduction dominates the R/R comparison. */
772 if (reduce_precedence)
773 {
774 /* It's not possible to find split-stable error action
775 domination after a potential reduction. */
776 if (find_stable_error_action_domination)
777 return ContributionIndex__none;
778 if (!require_split_stable)
779 return ci_rr_dominator;
780 if (!AnnotationList__isContributionAlways (self,
781 ci_rr_dominator))
782 return ContributionIndex__none;
783 if (AnnotationList__isContributionAlways (self, ci))
784 return ci_rr_dominator;
785 find_stable_domination_over_shift = true;
786 }
787 }
788 }
789 if (find_stable_domination_over_shift
790 || find_stable_error_action_domination)
791 return ContributionIndex__none;
792 /* No reduce or error action domination found, so shift dominates. */
793 return ci_shift;
794 }
795
796 /* R/R conflict, so the reduction with the lowest rule number dominates.
797 Fortunately, contributions are sorted by rule number. */
798 {
799 ContributionIndex ci;
800 for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
801 if (AnnotationList__stateMakesContribution (self, nitems, ci,
802 lookaheads))
803 {
804 if (require_split_stable
805 && !AnnotationList__isContributionAlways (self, ci))
806 return ContributionIndex__none;
807 return ci;
808 }
809 }
810 return ContributionIndex__none;
811}