]>
Commit | Line | Data |
---|---|---|
7fe11bb5 JD |
1 | /* IELR's inadequacy annotation list. |
2 | ||
7d424de1 | 3 | Copyright (C) 2009, 2010 Free Software Foundation, Inc. |
7fe11bb5 JD |
4 | |
5 | This file is part of Bison, the GNU Compiler Compiler. | |
6 | ||
7 | This program is free software: you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation, either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include <config.h> | |
21 | #include "system.h" | |
22 | ||
23 | #include "AnnotationList.h" | |
24 | #include "lalr.h" | |
25 | #include "ielr.h" | |
26 | ||
27 | /** | |
28 | * \pre | |
29 | * - <tt>annotations_obstackp != NULL</tt>. | |
30 | * \post | |
31 | * - \c result is a new \c AnnotationList with one node whose: | |
32 | * - \c inadequacyNode member is \c NULL. | |
33 | * - \c contributions member is allocated with \c contribution_count | |
34 | * uninitialized elements. | |
35 | * - All memory was allocated on \c annotations_obstackp. | |
36 | */ | |
37 | static AnnotationList* | |
38 | AnnotationList__alloc_on_obstack (ContributionIndex contribution_count, | |
39 | struct obstack *annotations_obstackp) | |
40 | { | |
41 | AnnotationList *result; | |
42 | size_t contributions_size = | |
43 | contribution_count * sizeof result->contributions[0]; | |
44 | result = obstack_alloc (annotations_obstackp, | |
45 | offsetof (AnnotationList, contributions) | |
46 | + contributions_size); | |
47 | result->next = NULL; | |
48 | result->inadequacyNode = NULL; | |
49 | return result; | |
50 | } | |
51 | ||
52 | /** | |
53 | * \pre | |
54 | * - <tt>self != NULL</tt>. | |
55 | * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>. | |
56 | * \post | |
57 | * - \c result = true iff contribution \c ci in \c self represents an | |
58 | * "always" contribution. | |
59 | */ | |
60 | static bool | |
61 | AnnotationList__isContributionAlways (AnnotationList const *self, | |
62 | ContributionIndex ci) | |
63 | { | |
64 | aver (0 <= ci && ci < self->inadequacyNode->contributionCount); | |
65 | return self->contributions[ci] == NULL; | |
66 | } | |
67 | ||
68 | /** | |
69 | * \pre | |
70 | * - \c self is a single node. | |
71 | * - \c self annotates the same state as every other node in \c list, and | |
72 | * that state has \c nitems kernel items. | |
73 | * \post | |
74 | * - If the list \c list already contains an identical annotation to \c self, | |
75 | * \c self was discarded, \c result is false, and the caller is responsible | |
76 | * for the memory of \c self. | |
77 | * - Otherwise, \c list now contains the node \c self, \c result is true, and | |
78 | * \c list assumes responsibility for the memory of \c self. | |
79 | * - The sort in \c list is: | |
b8d19cf8 JD |
80 | * - Sort in reverse order on the unique ID of the associated |
81 | * inadequacy node. Because these IDs are assigned in ascending | |
82 | * order, this should mean that the insertion position within an | |
83 | * annotation list is usually near the beginning with other | |
84 | * annotations associated with the same inadequacy. | |
7fe11bb5 JD |
85 | * - Next, sort on the first contribution that is different as follows: |
86 | * - Sort an always-contribution before a never-contribution before a | |
87 | * potential-contribution. | |
88 | * - Two always-contributions are identical. | |
89 | * - Two never-contributions are identical. | |
90 | * - For two potential-contributions, sort on the contributions' kernel | |
91 | * item bitsets interpreted as binary numbers. | |
92 | * - The sorting has a few effects: | |
93 | * - It accelerates elimination of identical annotations during insertion. | |
94 | * - It determines how the output of \c AnnotationList__debug is sorted. | |
95 | * - Other than that, it's probably not important. | |
96 | */ | |
97 | static bool | |
98 | AnnotationList__insertInto (AnnotationList *self, AnnotationList **list, | |
99 | size_t nitems) | |
100 | { | |
101 | AnnotationList **node; | |
102 | for (node = list; *node; node = &(*node)->next) | |
103 | { | |
104 | int cmp = 0; | |
105 | ContributionIndex ci; | |
b8d19cf8 | 106 | if (self->inadequacyNode->id < (*node)->inadequacyNode->id) |
7fe11bb5 | 107 | cmp = 1; |
b8d19cf8 | 108 | else if ((*node)->inadequacyNode->id < self->inadequacyNode->id) |
7fe11bb5 JD |
109 | cmp = -1; |
110 | else | |
111 | for (ci = 0; | |
112 | cmp == 0 && ci < self->inadequacyNode->contributionCount; | |
113 | ++ci) | |
114 | { | |
115 | if (AnnotationList__isContributionAlways (self, ci)) | |
116 | { | |
117 | if (!AnnotationList__isContributionAlways (*node, ci)) | |
118 | cmp = -1; | |
119 | } | |
120 | else if (AnnotationList__isContributionAlways (*node, ci)) | |
121 | cmp = 1; | |
122 | else | |
123 | { | |
124 | size_t item; | |
125 | for (item = 0; cmp == 0 && item < nitems; ++item) | |
126 | { | |
127 | if (!Sbitset__test (self->contributions[ci], item)) | |
128 | { | |
129 | if (Sbitset__test ((*node)->contributions[ci], item)) | |
130 | cmp = -1; | |
131 | } | |
132 | else if (!Sbitset__test ((*node)->contributions[ci], item)) | |
133 | cmp = 1; | |
134 | } | |
135 | } | |
136 | } | |
137 | if (cmp < 0) | |
138 | { | |
139 | self->next = *node; | |
140 | *node = self; | |
141 | break; | |
142 | } | |
143 | else if (cmp == 0) | |
144 | { | |
145 | self = NULL; | |
146 | break; | |
147 | } | |
148 | } | |
149 | if (!*node) | |
150 | *node = self; | |
151 | return self != NULL; | |
152 | } | |
153 | ||
154 | static bitset | |
155 | AnnotationList__compute_shift_tokens (transitions *trans) | |
156 | { | |
157 | bitset shift_tokens = bitset_create (ntokens, BITSET_FIXED); | |
158 | int i; | |
159 | FOR_EACH_SHIFT (trans, i) | |
160 | bitset_set (shift_tokens, TRANSITION_SYMBOL (trans, i)); | |
161 | return shift_tokens; | |
162 | } | |
163 | ||
164 | static bitset | |
165 | AnnotationList__compute_conflicted_tokens (bitset shift_tokens, | |
166 | reductions *reds) | |
167 | { | |
168 | bitset conflicted_tokens = bitset_create (ntokens, BITSET_FIXED); | |
169 | bitset conflicted_tokens_rule = bitset_create (ntokens, BITSET_FIXED); | |
170 | bitset tokens = bitset_create (ntokens, BITSET_FIXED); | |
171 | int i; | |
172 | ||
173 | bitset_copy (tokens, shift_tokens); | |
174 | for (i = 0; i < reds->num; ++i) | |
175 | { | |
176 | bitset_and (conflicted_tokens_rule, tokens, reds->lookahead_tokens[i]); | |
177 | bitset_or (conflicted_tokens, | |
178 | conflicted_tokens, conflicted_tokens_rule); | |
179 | bitset_or (tokens, tokens, reds->lookahead_tokens[i]); | |
180 | /* Check that rules are sorted on rule number or the next step in | |
181 | AnnotationList__compute_from_inadequacies will misbehave. */ | |
182 | aver (i == 0 || reds->rules[i-1] < reds->rules[i]); | |
183 | } | |
184 | ||
185 | bitset_free (tokens); | |
186 | bitset_free (conflicted_tokens_rule); | |
187 | ||
188 | return conflicted_tokens; | |
189 | } | |
190 | ||
191 | static bool | |
192 | AnnotationList__compute_lhs_contributions (state *s, rule *the_rule, | |
193 | symbol_number conflicted_token, | |
194 | bitsetv follow_kernel_items, | |
195 | bitsetv always_follows, | |
196 | state ***predecessors, | |
197 | bitset **item_lookahead_sets, | |
198 | Sbitset *items, | |
199 | struct obstack | |
200 | *annotations_obstackp) | |
201 | { | |
202 | goto_number lhs_goto = map_goto (s->number, the_rule->lhs->number); | |
203 | if (bitset_test (always_follows[lhs_goto], conflicted_token)) | |
204 | return true; | |
205 | *items = Sbitset__new_on_obstack (s->nitems, annotations_obstackp); | |
206 | { | |
207 | bitset_iterator biter_item; | |
208 | bitset_bindex item; | |
209 | BITSET_FOR_EACH (biter_item, follow_kernel_items[lhs_goto], item, 0) | |
210 | if (ielr_item_has_lookahead (s, 0, item, conflicted_token, | |
211 | predecessors, item_lookahead_sets)) | |
212 | Sbitset__set (*items, item); | |
213 | } | |
214 | return false; | |
215 | } | |
216 | ||
217 | static void | |
218 | AnnotationList__computePredecessorAnnotations (AnnotationList *self, state *s, | |
219 | bitsetv follow_kernel_items, | |
220 | bitsetv always_follows, | |
221 | state ***predecessors, | |
222 | bitset **item_lookahead_sets, | |
223 | AnnotationList | |
224 | **annotation_lists, | |
225 | AnnotationIndex | |
226 | *annotation_counts, | |
227 | struct obstack | |
228 | *annotations_obstackp) | |
229 | { | |
230 | state **predecessor; | |
231 | for (predecessor = predecessors[s->number]; *predecessor; ++predecessor) | |
232 | { | |
233 | AnnotationList *annotation_node = | |
234 | AnnotationList__alloc_on_obstack ( | |
235 | self->inadequacyNode->contributionCount, annotations_obstackp); | |
236 | annotation_node->inadequacyNode = self->inadequacyNode; | |
237 | bool potential_contribution = false; | |
238 | bitset *lookaheads = NULL; | |
239 | { | |
240 | ContributionIndex ci; | |
241 | for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci) | |
242 | { | |
243 | symbol_number contribution_token = | |
244 | InadequacyList__getContributionToken (self->inadequacyNode, ci) | |
245 | ->number; | |
246 | if (AnnotationList__isContributionAlways (self, ci)) | |
247 | { | |
248 | annotation_node->contributions[ci] = NULL; | |
249 | continue; | |
250 | } | |
251 | annotation_node->contributions[ci] = | |
252 | Sbitset__new_on_obstack ((*predecessor)->nitems, | |
253 | annotations_obstackp); | |
254 | { | |
255 | size_t predecessor_item = 0; | |
256 | Sbitset sbiter_item; | |
257 | Sbitset__Index self_item; | |
258 | SBITSET__FOR_EACH (self->contributions[ci], s->nitems, | |
259 | sbiter_item, self_item) | |
260 | { | |
261 | /* If this kernel item is the beginning of a RHS, it must be | |
262 | the kernel item in the start state, and so it has an empty | |
263 | lookahead set. Thus, it can't contribute to inadequacies, | |
264 | and so it should never have been identified as a | |
265 | contribution. If, instead, this kernel item is the | |
266 | successor of the start state's kernel item, the lookahead | |
267 | set is still empty, and so it also should never have been | |
268 | identified as a contribution. This situation is fortunate | |
269 | because we want to avoid the - 2 below in both cases. */ | |
270 | aver (s->items[self_item] > 1); | |
271 | /* If this kernel item is next to the beginning of the RHS, | |
272 | then check all of the predecessor's goto follows for the | |
273 | LHS. */ | |
274 | if (item_number_is_rule_number (ritem[s->items[self_item] | |
275 | - 2])) | |
276 | { | |
277 | Sbitset items; | |
278 | unsigned int rulei; | |
279 | for (rulei = s->items[self_item]; | |
280 | !item_number_is_rule_number (ritem[rulei]); | |
281 | ++rulei) | |
282 | ; | |
283 | if (AnnotationList__compute_lhs_contributions ( | |
284 | *predecessor, | |
285 | &rules[item_number_as_rule_number (ritem[rulei])], | |
286 | contribution_token, | |
287 | follow_kernel_items, always_follows, predecessors, | |
288 | item_lookahead_sets, &items, annotations_obstackp)) | |
289 | { | |
290 | obstack_free (annotations_obstackp, | |
291 | annotation_node->contributions[ci]); | |
292 | annotation_node->contributions[ci] = NULL; | |
293 | break; | |
294 | } | |
295 | else | |
296 | { | |
297 | Sbitset__or (annotation_node->contributions[ci], | |
298 | annotation_node->contributions[ci], | |
299 | items, (*predecessor)->nitems); | |
300 | obstack_free (annotations_obstackp, items); | |
301 | } | |
302 | } | |
303 | /* If this kernel item is later in the RHS, then check the | |
304 | predecessor item's lookahead set. */ | |
305 | else | |
306 | { | |
307 | /* We don't have to start the predecessor item search at | |
308 | the beginning every time because items from both | |
309 | states are sorted by their indices in ritem. */ | |
310 | for (; | |
311 | predecessor_item < (*predecessor)->nitems; | |
312 | ++predecessor_item) | |
313 | if ((*predecessor)->items[predecessor_item] | |
314 | == s->items[self_item] - 1) | |
315 | break; | |
316 | aver (predecessor_item != (*predecessor)->nitems); | |
317 | if (ielr_item_has_lookahead (*predecessor, 0, | |
318 | predecessor_item, | |
319 | contribution_token, | |
320 | predecessors, | |
321 | item_lookahead_sets)) | |
322 | Sbitset__set (annotation_node->contributions[ci], | |
323 | predecessor_item); | |
324 | } | |
325 | } | |
326 | } | |
327 | if (annotation_node->contributions[ci]) | |
328 | { | |
329 | Sbitset biter; | |
330 | Sbitset__Index i; | |
331 | SBITSET__FOR_EACH (annotation_node->contributions[ci], | |
332 | (*predecessor)->nitems, biter, i) | |
333 | { | |
334 | potential_contribution = true; | |
335 | if (!lookaheads) | |
336 | { | |
337 | size_t j; | |
338 | lookaheads = xnmalloc ((*predecessor)->nitems, | |
339 | sizeof *lookaheads); | |
340 | for (j = 0; j < (*predecessor)->nitems; ++j) | |
341 | lookaheads[j] = NULL; | |
342 | } | |
343 | if (!lookaheads[i]) | |
344 | lookaheads[i] = bitset_create (ntokens, BITSET_FIXED); | |
345 | bitset_set (lookaheads[i], contribution_token); | |
346 | } | |
347 | } | |
348 | } | |
349 | } | |
350 | ||
351 | /* If the predecessor has any contributions besides just "always" and | |
352 | "never" contributions: | |
353 | - If the dominant contribution is split-stable, the annotation could | |
354 | not affect merging on this predecessor state or its eventual | |
355 | predecessor states. Moreover, all contributions that affect | |
356 | whether the dominant contribution remains dominant must be "always" | |
357 | or "never" contributions in order for the dominant contribution to | |
358 | be split-stable. Thus, the dominant contribution computation result | |
359 | in eventual successor states will not be affected by lookaheads | |
360 | tracked for this predecessor state. (Also, as in the isocore | |
361 | compatibility test, we depend on the fact that isocores with equal | |
362 | dominant contributions will have the same dominant contribution when | |
363 | merged. Otherwise, we might have to worry that the presence of a | |
364 | potential contribution might somehow be the culprit of that behavior | |
365 | and thus need to be tracked regardless of the split stability of the | |
366 | dominant contribution.) Thus, go ahead and discard the annotation | |
367 | to save space now plus time during state splitting. | |
368 | - Otherwise, record the annotation, and compute any resulting | |
369 | annotations needed on predecessor states. */ | |
370 | if (potential_contribution) | |
371 | { | |
372 | if (ContributionIndex__none | |
373 | != AnnotationList__computeDominantContribution ( | |
374 | annotation_node, (*predecessor)->nitems, lookaheads, true)) | |
375 | { | |
376 | obstack_free (annotations_obstackp, annotation_node); | |
377 | annotation_node = NULL; | |
378 | } | |
379 | { | |
380 | size_t i; | |
381 | for (i = 0; i < (*predecessor)->nitems; ++i) | |
382 | if (lookaheads[i]) | |
383 | bitset_free (lookaheads[i]); | |
384 | free (lookaheads); | |
385 | } | |
386 | if (annotation_node) | |
387 | { | |
388 | if (AnnotationList__insertInto (annotation_node, | |
389 | &annotation_lists[(*predecessor) | |
390 | ->number], | |
391 | (*predecessor)->nitems)) | |
392 | { | |
393 | ++annotation_counts[(*predecessor)->number]; | |
394 | AnnotationList__computePredecessorAnnotations ( | |
395 | annotation_node, *predecessor, | |
396 | follow_kernel_items, always_follows, predecessors, | |
397 | item_lookahead_sets, annotation_lists, annotation_counts, | |
398 | annotations_obstackp); | |
399 | } | |
400 | else | |
401 | obstack_free (annotations_obstackp, annotation_node); | |
402 | } | |
403 | } | |
404 | else | |
405 | obstack_free (annotations_obstackp, annotation_node); | |
406 | } | |
407 | } | |
408 | ||
409 | void | |
b8d19cf8 JD |
410 | AnnotationList__compute_from_inadequacies ( |
411 | state *s, bitsetv follow_kernel_items, bitsetv always_follows, | |
412 | state ***predecessors, bitset **item_lookahead_sets, | |
413 | InadequacyList **inadequacy_lists, AnnotationList **annotation_lists, | |
414 | AnnotationIndex *annotation_counts, | |
415 | ContributionIndex *max_contributionsp, | |
416 | struct obstack *annotations_obstackp, | |
417 | InadequacyListNodeCount *inadequacy_list_node_count) | |
7fe11bb5 JD |
418 | { |
419 | bitsetv all_lookaheads; | |
420 | bitset shift_tokens; | |
421 | bitset conflicted_tokens; | |
422 | bitset_iterator biter_conflict; | |
423 | bitset_bindex conflicted_token; | |
424 | ||
425 | /* Return an empty list if s->lookahead_tokens = NULL. */ | |
426 | if (s->consistent) | |
427 | return; | |
428 | ||
429 | all_lookaheads = bitsetv_create (s->nitems, ntokens, BITSET_FIXED); | |
430 | bitsetv_ones (all_lookaheads); | |
431 | shift_tokens = AnnotationList__compute_shift_tokens (s->transitions); | |
432 | conflicted_tokens = | |
433 | AnnotationList__compute_conflicted_tokens (shift_tokens, s->reductions); | |
434 | ||
435 | /* Add an inadequacy annotation for each conflicted_token. */ | |
436 | BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0) | |
437 | { | |
438 | AnnotationList *annotation_node; | |
439 | /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now | |
440 | or convert it inside InadequacyList__new_conflict? */ | |
441 | bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED); | |
442 | ContributionIndex contribution_count = 0; | |
443 | bool potential_contribution = false; | |
444 | ||
445 | /* Allocate the annotation node. */ | |
446 | { | |
447 | int rule_i; | |
448 | for (rule_i = 0; rule_i < s->reductions->num; ++rule_i) | |
449 | if (bitset_test (s->reductions->lookahead_tokens[rule_i], | |
450 | conflicted_token)) | |
451 | ++contribution_count; | |
452 | if (bitset_test (shift_tokens, conflicted_token)) | |
453 | ++contribution_count; | |
454 | annotation_node = | |
455 | AnnotationList__alloc_on_obstack (contribution_count, | |
456 | annotations_obstackp); | |
457 | } | |
458 | ||
459 | /* Add a contribution for each reduction that has conflicted_token as a | |
460 | lookahead. */ | |
461 | { | |
462 | ContributionIndex ci = 0; | |
463 | int item_i = 0; | |
464 | int rule_i; | |
465 | for (rule_i = 0; rule_i < s->reductions->num; ++rule_i) | |
466 | { | |
467 | rule *the_rule = s->reductions->rules[rule_i]; | |
468 | if (bitset_test (s->reductions->lookahead_tokens[rule_i], | |
469 | conflicted_token)) | |
470 | { | |
471 | bitset_set (actions, rule_i); | |
472 | /* If this reduction is on a kernel item, just add it. */ | |
473 | if (!item_number_is_rule_number (the_rule->rhs[0])) | |
474 | { | |
475 | annotation_node->contributions[ci] = | |
476 | Sbitset__new_on_obstack (s->nitems, | |
477 | annotations_obstackp); | |
478 | /* Catch item_i up to rule_i. This works because both are | |
479 | sorted on rule number. */ | |
480 | while (!item_number_is_rule_number ( | |
481 | ritem[s->items[item_i]]) | |
482 | || item_number_as_rule_number ( | |
483 | ritem[s->items[item_i]]) | |
484 | != the_rule->number) | |
485 | { | |
486 | ++item_i; | |
487 | aver (item_i < s->nitems); | |
488 | } | |
489 | Sbitset__set (annotation_node->contributions[ci], item_i); | |
490 | } | |
491 | /* Otherwise, add the kernel items whose lookahead sets | |
492 | contribute the conflicted token to this reduction's | |
493 | lookahead set. */ | |
494 | else if (AnnotationList__compute_lhs_contributions ( | |
495 | s, the_rule, conflicted_token, follow_kernel_items, | |
496 | always_follows, predecessors, item_lookahead_sets, | |
497 | &annotation_node->contributions[ci], | |
498 | annotations_obstackp)) | |
499 | { | |
500 | annotation_node->contributions[ci++] = NULL; | |
501 | continue; | |
502 | } | |
503 | /* The lookahead token has to come from somewhere. */ | |
504 | aver (!Sbitset__isEmpty (annotation_node->contributions[ci], | |
505 | s->nitems)); | |
506 | ++ci; | |
507 | potential_contribution = true; | |
508 | } | |
509 | } | |
510 | } | |
511 | ||
512 | /* If there are any contributions besides just "always" contributions: | |
513 | - If there's also a shift contribution, record it. | |
514 | - If the dominant contribution is split-stable, then the annotation | |
515 | could not affect merging, so go ahead and discard the annotation and | |
516 | the inadequacy to save space now plus time during state splitting. | |
517 | - Otherwise, record the annotation and the inadequacy, and compute any | |
518 | resulting annotations needed on predecessor states. */ | |
519 | if (potential_contribution) | |
520 | { | |
521 | if (bitset_test (shift_tokens, conflicted_token)) | |
522 | { | |
523 | bitset_set (actions, s->reductions->num); | |
524 | annotation_node->contributions[contribution_count - 1] = NULL; | |
525 | } | |
526 | { | |
527 | InadequacyList *conflict_node = | |
b8d19cf8 JD |
528 | InadequacyList__new_conflict ( |
529 | s, symbols[conflicted_token], actions, | |
530 | inadequacy_list_node_count); | |
7fe11bb5 JD |
531 | actions = NULL; |
532 | annotation_node->inadequacyNode = conflict_node; | |
533 | if (ContributionIndex__none | |
534 | != AnnotationList__computeDominantContribution ( | |
535 | annotation_node, s->nitems, all_lookaheads, true)) | |
536 | { | |
537 | obstack_free (annotations_obstackp, annotation_node); | |
538 | InadequacyList__delete (conflict_node); | |
539 | } | |
540 | else | |
541 | { | |
542 | InadequacyList__prependTo (conflict_node, | |
543 | &inadequacy_lists[s->number]); | |
544 | aver (AnnotationList__insertInto ( | |
545 | annotation_node, &annotation_lists[s->number], | |
546 | s->nitems)); | |
547 | /* This aver makes sure the | |
548 | AnnotationList__computeDominantContribution check above | |
549 | does discard annotations in the simplest case of a S/R | |
550 | conflict with no token precedence. */ | |
551 | aver (!bitset_test (shift_tokens, conflicted_token) | |
552 | || symbols[conflicted_token]->prec); | |
553 | ++annotation_counts[s->number]; | |
554 | if (contribution_count > *max_contributionsp) | |
555 | *max_contributionsp = contribution_count; | |
556 | AnnotationList__computePredecessorAnnotations ( | |
557 | annotation_node, s, | |
558 | follow_kernel_items, always_follows, predecessors, | |
559 | item_lookahead_sets, annotation_lists, annotation_counts, | |
560 | annotations_obstackp); | |
561 | } | |
562 | } | |
563 | } | |
564 | else | |
565 | { | |
566 | bitset_free (actions); | |
567 | obstack_free (annotations_obstackp, annotation_node); | |
568 | } | |
569 | } | |
570 | ||
571 | bitsetv_free (all_lookaheads); | |
572 | bitset_free (shift_tokens); | |
573 | bitset_free (conflicted_tokens); | |
574 | } | |
575 | ||
576 | void | |
577 | AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces) | |
578 | { | |
579 | AnnotationList const *a; | |
580 | AnnotationIndex ai; | |
581 | for (a = self, ai = 0; a; a = a->next, ++ai) | |
582 | { | |
583 | { | |
584 | int j; | |
585 | for (j = 0; j < spaces; ++j) | |
586 | putc (' ', stderr); | |
587 | } | |
588 | fprintf (stderr, "Annotation %d (manifesting state %d):\n", | |
589 | ai, a->inadequacyNode->manifestingState->number); | |
590 | { | |
591 | ContributionIndex ci; | |
592 | bitset_bindex rulei = 0; /* init suppresses compiler warning */ | |
593 | rulei = bitset_first (a->inadequacyNode->inadequacy.conflict.actions); | |
594 | for (ci = 0; ci < a->inadequacyNode->contributionCount; ++ci) | |
595 | { | |
596 | symbol_number token = | |
597 | InadequacyList__getContributionToken (a->inadequacyNode, ci) | |
598 | ->number; | |
599 | { | |
600 | int j; | |
601 | for (j = 0; j < spaces+2; ++j) | |
602 | putc (' ', stderr); | |
603 | } | |
604 | if (ci == InadequacyList__getShiftContributionIndex ( | |
605 | a->inadequacyNode)) | |
606 | fprintf (stderr, "Contributes shift of token %d.\n", token); | |
607 | else | |
608 | { | |
609 | fprintf (stderr, "Contributes token %d", token); | |
610 | aver (rulei != BITSET_BINDEX_MAX); | |
611 | fprintf (stderr, " as lookahead, rule number %d", | |
612 | a->inadequacyNode->manifestingState | |
613 | ->reductions->rules[rulei]->number); | |
614 | rulei = | |
615 | bitset_next (a->inadequacyNode->inadequacy.conflict.actions, | |
616 | rulei+1); | |
617 | if (AnnotationList__isContributionAlways (a, ci)) | |
618 | fprintf (stderr, " always."); | |
619 | else | |
620 | { | |
621 | fprintf (stderr, ", items: "); | |
622 | Sbitset__fprint (a->contributions[ci], nitems, stderr); | |
623 | } | |
624 | fprintf (stderr, "\n"); | |
625 | } | |
626 | } | |
627 | } | |
628 | } | |
629 | } | |
630 | ||
631 | void | |
632 | AnnotationList__computeLookaheadFilter (AnnotationList const *self, | |
633 | size_t nitems, | |
634 | bitsetv lookahead_filter) | |
635 | { | |
636 | bitsetv_zero (lookahead_filter); | |
637 | for (; self; self = self->next) | |
638 | { | |
639 | ContributionIndex ci; | |
640 | for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci) | |
641 | if (!AnnotationList__isContributionAlways (self, ci)) | |
642 | { | |
643 | Sbitset__Index item; | |
644 | Sbitset biter; | |
645 | symbol_number token = | |
646 | InadequacyList__getContributionToken (self->inadequacyNode, ci) | |
647 | ->number; | |
648 | SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item) | |
649 | bitset_set (lookahead_filter[item], token); | |
650 | } | |
651 | } | |
652 | } | |
653 | ||
654 | /** | |
655 | * \pre | |
656 | * - <tt>self != NULL</tt>. | |
657 | * - \c nitems is the number of kernel items in the LR(0) state that \c self | |
658 | * annotates. | |
659 | * - \c lookaheads describes the lookahead sets on the kernel items of some | |
660 | * isocore of the LR(0) state that \c self annotates. Either: | |
661 | * - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel | |
662 | * item is empty. | |
663 | * - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either: | |
664 | * - \c NULL only if the lookahead set on kernel item \c i is empty. | |
665 | * - The (possibly empty) lookahead set on kernel item \c i. | |
666 | * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>. | |
667 | * \post | |
668 | * - \c result = true iff contribution \c ci in \c self is made by the state | |
669 | * described by \c lookaheads. | |
670 | */ | |
671 | static bool | |
672 | AnnotationList__stateMakesContribution (AnnotationList const *self, | |
673 | size_t nitems, ContributionIndex ci, | |
674 | bitset *lookaheads) | |
675 | { | |
676 | if (AnnotationList__isContributionAlways (self, ci)) | |
677 | return true; | |
678 | if (!lookaheads) | |
679 | return false; | |
680 | { | |
681 | symbol_number token = | |
682 | InadequacyList__getContributionToken (self->inadequacyNode, ci)->number; | |
683 | Sbitset__Index item; | |
684 | Sbitset biter; | |
685 | SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item) | |
686 | if (lookaheads[item] && bitset_test (lookaheads[item], token)) | |
687 | return true; | |
688 | } | |
689 | return false; | |
690 | } | |
691 | ||
692 | ContributionIndex | |
693 | AnnotationList__computeDominantContribution (AnnotationList const *self, | |
694 | size_t nitems, bitset *lookaheads, | |
695 | bool require_split_stable) | |
696 | { | |
697 | symbol *token; | |
698 | ContributionIndex const ci_shift = | |
699 | InadequacyList__getShiftContributionIndex (self->inadequacyNode); | |
700 | ||
701 | token = self->inadequacyNode->inadequacy.conflict.token; | |
702 | ||
703 | /* S/R conflict. */ | |
704 | if (ci_shift != ContributionIndex__none) | |
705 | { | |
706 | bool find_stable_domination_over_shift = false; | |
707 | bool find_stable_error_action_domination = false; | |
708 | { | |
709 | ContributionIndex ci; | |
710 | int actioni; | |
711 | ContributionIndex ci_rr_dominator = ContributionIndex__none; | |
712 | int shift_precedence = token->prec; | |
713 | ||
714 | /* If the token has no precedence set, shift is always chosen. */ | |
715 | if (!shift_precedence) | |
716 | return ci_shift; | |
717 | ||
718 | /* Figure out which reductions contribute, which of those would | |
719 | dominate in a R/R comparison, and whether any reduction dominates | |
720 | the shift so that the R/R comparison is actually needed. */ | |
721 | for (ci = 0, actioni = bitset_first (self->inadequacyNode->inadequacy | |
722 | .conflict.actions); | |
723 | ci < self->inadequacyNode->contributionCount; | |
724 | ++ci, actioni = bitset_next (self->inadequacyNode->inadequacy | |
725 | .conflict.actions, actioni+1)) | |
726 | { | |
727 | int reduce_precedence = 0; | |
728 | if (ci == ci_shift) | |
729 | continue; | |
730 | { | |
731 | rule *r = self->inadequacyNode->manifestingState | |
732 | ->reductions->rules[actioni]; | |
733 | if (r->prec) | |
734 | reduce_precedence = r->prec->prec; | |
735 | } | |
736 | /* If there's no need to check whether this reduction actually | |
737 | contributes because the shift eliminates it from the R/R | |
738 | comparison anyway, continue to the next reduction. */ | |
739 | if (reduce_precedence | |
740 | && (reduce_precedence < shift_precedence | |
741 | || (reduce_precedence == shift_precedence | |
742 | && token->assoc == right_assoc))) | |
743 | continue; | |
744 | if (!AnnotationList__stateMakesContribution (self, nitems, ci, | |
745 | lookaheads)) | |
746 | continue; | |
747 | /* This uneliminated reduction contributes, so see if it can cause | |
748 | an error action. */ | |
749 | if (reduce_precedence == shift_precedence | |
750 | && token->assoc == non_assoc) | |
751 | { | |
752 | /* It's not possible to find split-stable domination over | |
753 | shift after a potential %nonassoc. */ | |
754 | if (find_stable_domination_over_shift) | |
755 | return ContributionIndex__none; | |
756 | if (!require_split_stable | |
757 | || AnnotationList__isContributionAlways (self, ci)) | |
758 | return ContributionIndex__error_action; | |
759 | find_stable_error_action_domination = true; | |
760 | } | |
761 | /* Consider this uneliminated contributing reduction in the R/R | |
762 | comparison. */ | |
763 | if (ci_rr_dominator == ContributionIndex__none) | |
764 | ci_rr_dominator = ci; | |
765 | /* If precedence is set for this uneliminated contributing | |
766 | reduction, it dominates the shift, so try to figure out which | |
767 | reduction dominates the R/R comparison. */ | |
768 | if (reduce_precedence) | |
769 | { | |
770 | /* It's not possible to find split-stable error action | |
771 | domination after a potential reduction. */ | |
772 | if (find_stable_error_action_domination) | |
773 | return ContributionIndex__none; | |
774 | if (!require_split_stable) | |
775 | return ci_rr_dominator; | |
776 | if (!AnnotationList__isContributionAlways (self, | |
777 | ci_rr_dominator)) | |
778 | return ContributionIndex__none; | |
779 | if (AnnotationList__isContributionAlways (self, ci)) | |
780 | return ci_rr_dominator; | |
781 | find_stable_domination_over_shift = true; | |
782 | } | |
783 | } | |
784 | } | |
785 | if (find_stable_domination_over_shift | |
786 | || find_stable_error_action_domination) | |
787 | return ContributionIndex__none; | |
788 | /* No reduce or error action domination found, so shift dominates. */ | |
789 | return ci_shift; | |
790 | } | |
791 | ||
792 | /* R/R conflict, so the reduction with the lowest rule number dominates. | |
793 | Fortunately, contributions are sorted by rule number. */ | |
794 | { | |
795 | ContributionIndex ci; | |
796 | for (ci = 0; ci < self->inadequacyNode->contributionCount; ++ci) | |
797 | if (AnnotationList__stateMakesContribution (self, nitems, ci, | |
798 | lookaheads)) | |
799 | { | |
800 | if (require_split_stable | |
801 | && !AnnotationList__isContributionAlways (self, ci)) | |
802 | return ContributionIndex__none; | |
803 | return ci; | |
804 | } | |
805 | } | |
806 | return ContributionIndex__none; | |
807 | } |