2 *******************************************************************************
3 * Copyright (C) 2006-2012, International Business Machines Corporation
4 * and others. All Rights Reserved.
5 *******************************************************************************
8 #include "unicode/utypes.h"
10 #if !UCONFIG_NO_BREAK_ITERATION
14 #include "unicode/uniset.h"
15 #include "unicode/chariter.h"
16 #include "unicode/ubrk.h"
19 #include "unicode/normlzr.h"
21 #include "dictionarydata.h"
26 ******************************************************************
29 DictionaryBreakEngine::DictionaryBreakEngine(uint32_t breakTypes
) {
33 DictionaryBreakEngine::~DictionaryBreakEngine() {
37 DictionaryBreakEngine::handles(UChar32 c
, int32_t breakType
) const {
38 return (breakType
>= 0 && breakType
< 32 && (((uint32_t)1 << breakType
) & fTypes
)
43 DictionaryBreakEngine::findBreaks( UText
*text
,
48 UStack
&foundBreaks
) const {
51 // Find the span of characters included in the set.
52 int32_t start
= (int32_t)utext_getNativeIndex(text
);
56 UChar32 c
= utext_current32(text
);
58 UBool isDict
= fSet
.contains(c
);
59 while((current
= (int32_t)utext_getNativeIndex(text
)) > startPos
&& isDict
) {
60 c
= utext_previous32(text
);
61 isDict
= fSet
.contains(c
);
63 rangeStart
= (current
< startPos
) ? startPos
: current
+(isDict
? 0 : 1);
67 while((current
= (int32_t)utext_getNativeIndex(text
)) < endPos
&& fSet
.contains(c
)) {
68 utext_next32(text
); // TODO: recast loop for postincrement
69 c
= utext_current32(text
);
74 if (breakType
>= 0 && breakType
< 32 && (((uint32_t)1 << breakType
) & fTypes
)) {
75 result
= divideUpDictionaryRange(text
, rangeStart
, rangeEnd
, foundBreaks
);
76 utext_setNativeIndex(text
, current
);
83 DictionaryBreakEngine::setCharacters( const UnicodeSet
&set
) {
85 // Compact for caching
90 ******************************************************************
94 // Helper class for improving readability of the Thai word break
95 // algorithm. The implementation is completely inline.
97 // List size, limited by the maximum number of words in the dictionary
98 // that form a nested sequence.
99 #define POSSIBLE_WORD_LIST_MAX 20
103 // list of word candidate lengths, in increasing length order
104 int32_t lengths
[POSSIBLE_WORD_LIST_MAX
];
105 int32_t count
; // Count of candidates
106 int32_t prefix
; // The longest match with a dictionary word
107 int32_t offset
; // Offset in the text of these candidates
108 int mark
; // The preferred candidate's offset
109 int current
; // The candidate we're currently looking at
115 // Fill the list of candidates if needed, select the longest, and return the number found
116 int candidates( UText
*text
, DictionaryMatcher
*dict
, int32_t rangeEnd
);
118 // Select the currently marked candidate, point after it in the text, and invalidate self
119 int32_t acceptMarked( UText
*text
);
121 // Back up from the current candidate to the next shorter one; return TRUE if that exists
122 // and point the text after it
123 UBool
backUp( UText
*text
);
125 // Return the longest prefix this candidate location shares with a dictionary word
126 int32_t longestPrefix();
128 // Mark the current candidate as the one we like
133 PossibleWord::PossibleWord() {
138 PossibleWord::~PossibleWord() {
142 PossibleWord::candidates( UText
*text
, DictionaryMatcher
*dict
, int32_t rangeEnd
) {
143 // TODO: If getIndex is too slow, use offset < 0 and add discardAll()
144 int32_t start
= (int32_t)utext_getNativeIndex(text
);
145 if (start
!= offset
) {
147 prefix
= dict
->matches(text
, rangeEnd
-start
, lengths
, count
, sizeof(lengths
)/sizeof(lengths
[0]));
148 // Dictionary leaves text after longest prefix, not longest word. Back up.
150 utext_setNativeIndex(text
, start
);
154 utext_setNativeIndex(text
, start
+lengths
[count
-1]);
162 PossibleWord::acceptMarked( UText
*text
) {
163 utext_setNativeIndex(text
, offset
+ lengths
[mark
]);
164 return lengths
[mark
];
168 PossibleWord::backUp( UText
*text
) {
170 utext_setNativeIndex(text
, offset
+ lengths
[--current
]);
177 PossibleWord::longestPrefix() {
182 PossibleWord::markCurrent() {
186 // How many words in a row are "good enough"?
187 #define THAI_LOOKAHEAD 3
189 // Will not combine a non-word with a preceding dictionary word longer than this
190 #define THAI_ROOT_COMBINE_THRESHOLD 3
192 // Will not combine a non-word that shares at least this much prefix with a
193 // dictionary word, with a preceding word
194 #define THAI_PREFIX_COMBINE_THRESHOLD 3
196 // Ellision character
197 #define THAI_PAIYANNOI 0x0E2F
200 #define THAI_MAIYAMOK 0x0E46
203 #define THAI_MIN_WORD 2
205 // Minimum number of characters for two words
206 #define THAI_MIN_WORD_SPAN (THAI_MIN_WORD * 2)
208 ThaiBreakEngine::ThaiBreakEngine(DictionaryMatcher
*adoptDictionary
, UErrorCode
&status
)
209 : DictionaryBreakEngine((1<<UBRK_WORD
) | (1<<UBRK_LINE
)),
210 fDictionary(adoptDictionary
)
212 fThaiWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[[:Thai:]&[:LineBreak=SA:]]"), status
);
213 if (U_SUCCESS(status
)) {
214 setCharacters(fThaiWordSet
);
216 fMarkSet
.applyPattern(UNICODE_STRING_SIMPLE("[[:Thai:]&[:LineBreak=SA:]&[:M:]]"), status
);
217 fMarkSet
.add(0x0020);
218 fEndWordSet
= fThaiWordSet
;
219 fEndWordSet
.remove(0x0E31); // MAI HAN-AKAT
220 fEndWordSet
.remove(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI
221 fBeginWordSet
.add(0x0E01, 0x0E2E); // KO KAI through HO NOKHUK
222 fBeginWordSet
.add(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI
223 fSuffixSet
.add(THAI_PAIYANNOI
);
224 fSuffixSet
.add(THAI_MAIYAMOK
);
226 // Compact for caching.
228 fEndWordSet
.compact();
229 fBeginWordSet
.compact();
230 fSuffixSet
.compact();
233 ThaiBreakEngine::~ThaiBreakEngine() {
238 ThaiBreakEngine::divideUpDictionaryRange( UText
*text
,
241 UStack
&foundBreaks
) const {
242 if ((rangeEnd
- rangeStart
) < THAI_MIN_WORD_SPAN
) {
243 return 0; // Not enough characters for two words
246 uint32_t wordsFound
= 0;
249 UErrorCode status
= U_ZERO_ERROR
;
250 PossibleWord words
[THAI_LOOKAHEAD
];
253 utext_setNativeIndex(text
, rangeStart
);
255 while (U_SUCCESS(status
) && (current
= (int32_t)utext_getNativeIndex(text
)) < rangeEnd
) {
258 // Look for candidate words at the current position
259 int candidates
= words
[wordsFound%THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
);
261 // If we found exactly one, use that
262 if (candidates
== 1) {
263 wordLength
= words
[wordsFound
% THAI_LOOKAHEAD
].acceptMarked(text
);
266 // If there was more than one, see which one can take us forward the most words
267 else if (candidates
> 1) {
268 // If we're already at the end of the range, we're done
269 if ((int32_t)utext_getNativeIndex(text
) >= rangeEnd
) {
273 int wordsMatched
= 1;
274 if (words
[(wordsFound
+ 1) % THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
) > 0) {
275 if (wordsMatched
< 2) {
276 // Followed by another dictionary word; mark first word as a good candidate
277 words
[wordsFound%THAI_LOOKAHEAD
].markCurrent();
281 // If we're already at the end of the range, we're done
282 if ((int32_t)utext_getNativeIndex(text
) >= rangeEnd
) {
286 // See if any of the possible second words is followed by a third word
288 // If we find a third word, stop right away
289 if (words
[(wordsFound
+ 2) % THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
)) {
290 words
[wordsFound
% THAI_LOOKAHEAD
].markCurrent();
294 while (words
[(wordsFound
+ 1) % THAI_LOOKAHEAD
].backUp(text
));
297 while (words
[wordsFound
% THAI_LOOKAHEAD
].backUp(text
));
299 wordLength
= words
[wordsFound
% THAI_LOOKAHEAD
].acceptMarked(text
);
303 // We come here after having either found a word or not. We look ahead to the
304 // next word. If it's not a dictionary word, we will combine it withe the word we
305 // just found (if there is one), but only if the preceding word does not exceed
307 // The text iterator should now be positioned at the end of the word we found.
308 if ((int32_t)utext_getNativeIndex(text
) < rangeEnd
&& wordLength
< THAI_ROOT_COMBINE_THRESHOLD
) {
309 // if it is a dictionary word, do nothing. If it isn't, then if there is
310 // no preceding word, or the non-word shares less than the minimum threshold
311 // of characters with a dictionary word, then scan to resynchronize
312 if (words
[wordsFound
% THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
) <= 0
314 || words
[wordsFound%THAI_LOOKAHEAD
].longestPrefix() < THAI_PREFIX_COMBINE_THRESHOLD
)) {
315 // Look for a plausible word boundary
316 //TODO: This section will need a rework for UText.
317 int32_t remaining
= rangeEnd
- (current
+wordLength
);
318 UChar32 pc
= utext_current32(text
);
322 uc
= utext_current32(text
);
323 // TODO: Here we're counting on the fact that the SA languages are all
324 // in the BMP. This should get fixed with the UText rework.
326 if (--remaining
<= 0) {
329 if (fEndWordSet
.contains(pc
) && fBeginWordSet
.contains(uc
)) {
330 // Maybe. See if it's in the dictionary.
331 // NOTE: In the original Apple code, checked that the next
332 // two characters after uc were not 0x0E4C THANTHAKHAT before
333 // checking the dictionary. That is just a performance filter,
334 // but it's not clear it's faster than checking the trie.
335 int candidates
= words
[(wordsFound
+ 1) % THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
);
336 utext_setNativeIndex(text
, current
+ wordLength
+ chars
);
337 if (candidates
> 0) {
344 // Bump the word count if there wasn't already one
345 if (wordLength
<= 0) {
349 // Update the length with the passed-over characters
353 // Back up to where we were for next iteration
354 utext_setNativeIndex(text
, current
+wordLength
);
358 // Never stop before a combining mark.
360 while ((currPos
= (int32_t)utext_getNativeIndex(text
)) < rangeEnd
&& fMarkSet
.contains(utext_current32(text
))) {
362 wordLength
+= (int32_t)utext_getNativeIndex(text
) - currPos
;
365 // Look ahead for possible suffixes if a dictionary word does not follow.
366 // We do this in code rather than using a rule so that the heuristic
367 // resynch continues to function. For example, one of the suffix characters
368 // could be a typo in the middle of a word.
369 if ((int32_t)utext_getNativeIndex(text
) < rangeEnd
&& wordLength
> 0) {
370 if (words
[wordsFound%THAI_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
) <= 0
371 && fSuffixSet
.contains(uc
= utext_current32(text
))) {
372 if (uc
== THAI_PAIYANNOI
) {
373 if (!fSuffixSet
.contains(utext_previous32(text
))) {
374 // Skip over previous end and PAIYANNOI
377 wordLength
+= 1; // Add PAIYANNOI to word
378 uc
= utext_current32(text
); // Fetch next character
381 // Restore prior position
385 if (uc
== THAI_MAIYAMOK
) {
386 if (utext_previous32(text
) != THAI_MAIYAMOK
) {
387 // Skip over previous end and MAIYAMOK
390 wordLength
+= 1; // Add MAIYAMOK to word
393 // Restore prior position
399 utext_setNativeIndex(text
, current
+wordLength
);
403 // Did we find a word on this iteration? If so, push it on the break stack
404 if (wordLength
> 0) {
405 foundBreaks
.push((current
+wordLength
), status
);
409 // Don't return a break for the end of the dictionary range if there is one there.
410 if (foundBreaks
.peeki() >= rangeEnd
) {
411 (void) foundBreaks
.popi();
418 // How many words in a row are "good enough"?
419 #define KHMER_LOOKAHEAD 3
421 // Will not combine a non-word with a preceding dictionary word longer than this
422 #define KHMER_ROOT_COMBINE_THRESHOLD 3
424 // Will not combine a non-word that shares at least this much prefix with a
425 // dictionary word, with a preceding word
426 #define KHMER_PREFIX_COMBINE_THRESHOLD 3
429 #define KHMER_MIN_WORD 2
431 // Minimum number of characters for two words
432 #define KHMER_MIN_WORD_SPAN (KHMER_MIN_WORD * 2)
434 KhmerBreakEngine::KhmerBreakEngine(DictionaryMatcher
*adoptDictionary
, UErrorCode
&status
)
435 : DictionaryBreakEngine((1 << UBRK_WORD
) | (1 << UBRK_LINE
)),
436 fDictionary(adoptDictionary
)
438 fKhmerWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]]"), status
);
439 if (U_SUCCESS(status
)) {
440 setCharacters(fKhmerWordSet
);
442 fMarkSet
.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]&[:M:]]"), status
);
443 fMarkSet
.add(0x0020);
444 fEndWordSet
= fKhmerWordSet
;
445 fBeginWordSet
.add(0x1780, 0x17B3);
446 //fBeginWordSet.add(0x17A3, 0x17A4); // deprecated vowels
447 //fEndWordSet.remove(0x17A5, 0x17A9); // Khmer independent vowels that can't end a word
448 //fEndWordSet.remove(0x17B2); // Khmer independent vowel that can't end a word
449 fEndWordSet
.remove(0x17D2); // KHMER SIGN COENG that combines some following characters
450 //fEndWordSet.remove(0x17B6, 0x17C5); // Remove dependent vowels
451 // fEndWordSet.remove(0x0E31); // MAI HAN-AKAT
452 // fEndWordSet.remove(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI
453 // fBeginWordSet.add(0x0E01, 0x0E2E); // KO KAI through HO NOKHUK
454 // fBeginWordSet.add(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI
455 // fSuffixSet.add(THAI_PAIYANNOI);
456 // fSuffixSet.add(THAI_MAIYAMOK);
458 // Compact for caching.
460 fEndWordSet
.compact();
461 fBeginWordSet
.compact();
462 // fSuffixSet.compact();
465 KhmerBreakEngine::~KhmerBreakEngine() {
470 KhmerBreakEngine::divideUpDictionaryRange( UText
*text
,
473 UStack
&foundBreaks
) const {
474 if ((rangeEnd
- rangeStart
) < KHMER_MIN_WORD_SPAN
) {
475 return 0; // Not enough characters for two words
478 uint32_t wordsFound
= 0;
481 UErrorCode status
= U_ZERO_ERROR
;
482 PossibleWord words
[KHMER_LOOKAHEAD
];
485 utext_setNativeIndex(text
, rangeStart
);
487 while (U_SUCCESS(status
) && (current
= (int32_t)utext_getNativeIndex(text
)) < rangeEnd
) {
490 // Look for candidate words at the current position
491 int candidates
= words
[wordsFound%KHMER_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
);
493 // If we found exactly one, use that
494 if (candidates
== 1) {
495 wordLength
= words
[wordsFound%KHMER_LOOKAHEAD
].acceptMarked(text
);
499 // If there was more than one, see which one can take us forward the most words
500 else if (candidates
> 1) {
501 // If we're already at the end of the range, we're done
502 if ((int32_t)utext_getNativeIndex(text
) >= rangeEnd
) {
506 int wordsMatched
= 1;
507 if (words
[(wordsFound
+ 1) % KHMER_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
) > 0) {
508 if (wordsMatched
< 2) {
509 // Followed by another dictionary word; mark first word as a good candidate
510 words
[wordsFound
% KHMER_LOOKAHEAD
].markCurrent();
514 // If we're already at the end of the range, we're done
515 if ((int32_t)utext_getNativeIndex(text
) >= rangeEnd
) {
519 // See if any of the possible second words is followed by a third word
521 // If we find a third word, stop right away
522 if (words
[(wordsFound
+ 2) % KHMER_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
)) {
523 words
[wordsFound
% KHMER_LOOKAHEAD
].markCurrent();
527 while (words
[(wordsFound
+ 1) % KHMER_LOOKAHEAD
].backUp(text
));
530 while (words
[wordsFound
% KHMER_LOOKAHEAD
].backUp(text
));
532 wordLength
= words
[wordsFound
% KHMER_LOOKAHEAD
].acceptMarked(text
);
536 // We come here after having either found a word or not. We look ahead to the
537 // next word. If it's not a dictionary word, we will combine it with the word we
538 // just found (if there is one), but only if the preceding word does not exceed
540 // The text iterator should now be positioned at the end of the word we found.
541 if ((int32_t)utext_getNativeIndex(text
) < rangeEnd
&& wordLength
< KHMER_ROOT_COMBINE_THRESHOLD
) {
542 // if it is a dictionary word, do nothing. If it isn't, then if there is
543 // no preceding word, or the non-word shares less than the minimum threshold
544 // of characters with a dictionary word, then scan to resynchronize
545 if (words
[wordsFound
% KHMER_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
) <= 0
547 || words
[wordsFound
% KHMER_LOOKAHEAD
].longestPrefix() < KHMER_PREFIX_COMBINE_THRESHOLD
)) {
548 // Look for a plausible word boundary
549 //TODO: This section will need a rework for UText.
550 int32_t remaining
= rangeEnd
- (current
+wordLength
);
551 UChar32 pc
= utext_current32(text
);
555 uc
= utext_current32(text
);
556 // TODO: Here we're counting on the fact that the SA languages are all
557 // in the BMP. This should get fixed with the UText rework.
559 if (--remaining
<= 0) {
562 if (fEndWordSet
.contains(pc
) && fBeginWordSet
.contains(uc
)) {
563 // Maybe. See if it's in the dictionary.
564 int candidates
= words
[(wordsFound
+ 1) % KHMER_LOOKAHEAD
].candidates(text
, fDictionary
, rangeEnd
);
565 utext_setNativeIndex(text
, current
+wordLength
+chars
);
566 if (candidates
> 0) {
573 // Bump the word count if there wasn't already one
574 if (wordLength
<= 0) {
578 // Update the length with the passed-over characters
582 // Back up to where we were for next iteration
583 utext_setNativeIndex(text
, current
+wordLength
);
587 // Never stop before a combining mark.
589 while ((currPos
= (int32_t)utext_getNativeIndex(text
)) < rangeEnd
&& fMarkSet
.contains(utext_current32(text
))) {
591 wordLength
+= (int32_t)utext_getNativeIndex(text
) - currPos
;
594 // Look ahead for possible suffixes if a dictionary word does not follow.
595 // We do this in code rather than using a rule so that the heuristic
596 // resynch continues to function. For example, one of the suffix characters
597 // could be a typo in the middle of a word.
598 // if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength > 0) {
599 // if (words[wordsFound%KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0
600 // && fSuffixSet.contains(uc = utext_current32(text))) {
601 // if (uc == KHMER_PAIYANNOI) {
602 // if (!fSuffixSet.contains(utext_previous32(text))) {
603 // // Skip over previous end and PAIYANNOI
604 // utext_next32(text);
605 // utext_next32(text);
606 // wordLength += 1; // Add PAIYANNOI to word
607 // uc = utext_current32(text); // Fetch next character
610 // // Restore prior position
611 // utext_next32(text);
614 // if (uc == KHMER_MAIYAMOK) {
615 // if (utext_previous32(text) != KHMER_MAIYAMOK) {
616 // // Skip over previous end and MAIYAMOK
617 // utext_next32(text);
618 // utext_next32(text);
619 // wordLength += 1; // Add MAIYAMOK to word
622 // // Restore prior position
623 // utext_next32(text);
628 // utext_setNativeIndex(text, current+wordLength);
632 // Did we find a word on this iteration? If so, push it on the break stack
633 if (wordLength
> 0) {
634 foundBreaks
.push((current
+wordLength
), status
);
638 // Don't return a break for the end of the dictionary range if there is one there.
639 if (foundBreaks
.peeki() >= rangeEnd
) {
640 (void) foundBreaks
.popi();
647 #if !UCONFIG_NO_NORMALIZATION
649 ******************************************************************
652 static const uint32_t kuint32max
= 0xFFFFFFFF;
653 CjkBreakEngine::CjkBreakEngine(DictionaryMatcher
*adoptDictionary
, LanguageType type
, UErrorCode
&status
)
654 : DictionaryBreakEngine(1 << UBRK_WORD
), fDictionary(adoptDictionary
) {
655 // Korean dictionary only includes Hangul syllables
656 fHangulWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[\\uac00-\\ud7a3]"), status
);
657 fHanWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[:Han:]"), status
);
658 fKatakanaWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[[:Katakana:]\\uff9e\\uff9f]"), status
);
659 fHiraganaWordSet
.applyPattern(UNICODE_STRING_SIMPLE("[:Hiragana:]"), status
);
661 if (U_SUCCESS(status
)) {
662 // handle Korean and Japanese/Chinese using different dictionaries
663 if (type
== kKorean
) {
664 setCharacters(fHangulWordSet
);
665 } else { //Chinese and Japanese
667 cjSet
.addAll(fHanWordSet
);
668 cjSet
.addAll(fKatakanaWordSet
);
669 cjSet
.addAll(fHiraganaWordSet
);
672 setCharacters(cjSet
);
677 CjkBreakEngine::~CjkBreakEngine(){
681 // The katakanaCost values below are based on the length frequencies of all
682 // katakana phrases in the dictionary
683 static const int kMaxKatakanaLength
= 8;
684 static const int kMaxKatakanaGroupLength
= 20;
685 static const uint32_t maxSnlp
= 255;
687 static inline uint32_t getKatakanaCost(int wordLength
){
688 //TODO: fill array with actual values from dictionary!
689 static const uint32_t katakanaCost
[kMaxKatakanaLength
+ 1]
690 = {8192, 984, 408, 240, 204, 252, 300, 372, 480};
691 return (wordLength
> kMaxKatakanaLength
) ? 8192 : katakanaCost
[wordLength
];
694 static inline bool isKatakana(uint16_t value
) {
695 return (value
>= 0x30A1u
&& value
<= 0x30FEu
&& value
!= 0x30FBu
) ||
696 (value
>= 0xFF66u
&& value
<= 0xFF9fu
);
699 // A very simple helper class to streamline the buffer handling in
700 // divideUpDictionaryRange.
701 template<class T
, size_t N
>
704 AutoBuffer(size_t size
) : buffer(stackBuffer
), capacity(N
) {
706 buffer
= reinterpret_cast<T
*>(uprv_malloc(sizeof(T
)*size
));
711 if (buffer
!= stackBuffer
)
719 const T
& operator[] (size_t i
) const {
723 T
& operator[] (size_t i
) {
727 // resize without copy
728 void resize(size_t size
) {
729 if (size
<= capacity
)
731 if (buffer
!= stackBuffer
)
733 buffer
= reinterpret_cast<T
*>(uprv_malloc(sizeof(T
)*size
));
746 * @param text A UText representing the text
747 * @param rangeStart The start of the range of dictionary characters
748 * @param rangeEnd The end of the range of dictionary characters
749 * @param foundBreaks Output of C array of int32_t break positions, or 0
750 * @return The number of breaks found
753 CjkBreakEngine::divideUpDictionaryRange( UText
*text
,
756 UStack
&foundBreaks
) const {
757 if (rangeStart
>= rangeEnd
) {
761 const size_t defaultInputLength
= 80;
762 size_t inputLength
= rangeEnd
- rangeStart
;
763 // TODO: Replace by UnicodeString.
764 AutoBuffer
<UChar
, defaultInputLength
> charString(inputLength
);
766 // Normalize the input string and put it in normalizedText.
767 // The map from the indices of the normalized input to the raw
768 // input is kept in charPositions.
769 UErrorCode status
= U_ZERO_ERROR
;
770 utext_extract(text
, rangeStart
, rangeEnd
, charString
.elems(), inputLength
, &status
);
771 if (U_FAILURE(status
)) {
775 UnicodeString
inputString(charString
.elems(), inputLength
);
776 // TODO: Use Normalizer2.
777 UNormalizationMode norm_mode
= UNORM_NFKC
;
779 Normalizer::quickCheck(inputString
, norm_mode
, status
) == UNORM_YES
||
780 Normalizer::isNormalized(inputString
, norm_mode
, status
);
782 // TODO: Replace by UVector32.
783 AutoBuffer
<int32_t, defaultInputLength
> charPositions(inputLength
+ 1);
785 UText normalizedText
= UTEXT_INITIALIZER
;
786 // Needs to be declared here because normalizedText holds onto its buffer.
787 UnicodeString normalizedString
;
790 charPositions
[0] = 0;
791 while(index
< inputString
.length()) {
792 index
= inputString
.moveIndex32(index
, 1);
793 charPositions
[++numChars
] = index
;
795 utext_openUnicodeString(&normalizedText
, &inputString
, &status
);
798 Normalizer::normalize(inputString
, norm_mode
, 0, normalizedString
, status
);
799 if (U_FAILURE(status
)) {
802 charPositions
.resize(normalizedString
.length() + 1);
803 Normalizer
normalizer(charString
.elems(), inputLength
, norm_mode
);
805 charPositions
[0] = 0;
806 while(index
< normalizer
.endIndex()){
807 /* UChar32 uc = */ normalizer
.next();
808 charPositions
[++numChars
] = index
= normalizer
.getIndex();
810 utext_openUnicodeString(&normalizedText
, &normalizedString
, &status
);
813 if (U_FAILURE(status
)) {
817 // From this point on, all the indices refer to the indices of
818 // the normalized input string.
820 // bestSnlp[i] is the snlp of the best segmentation of the first i
821 // characters in the range to be matched.
822 // TODO: Replace by UVector32.
823 AutoBuffer
<uint32_t, defaultInputLength
> bestSnlp(numChars
+ 1);
825 for(int i
= 1; i
<= numChars
; i
++) {
826 bestSnlp
[i
] = kuint32max
;
829 // prev[i] is the index of the last CJK character in the previous word in
830 // the best segmentation of the first i characters.
831 // TODO: Replace by UVector32.
832 AutoBuffer
<int, defaultInputLength
> prev(numChars
+ 1);
833 for(int i
= 0; i
<= numChars
; i
++){
837 const size_t maxWordSize
= 20;
838 // TODO: Replace both with UVector32.
839 AutoBuffer
<int32_t, maxWordSize
> values(numChars
);
840 AutoBuffer
<int32_t, maxWordSize
> lengths(numChars
);
842 // Dynamic programming to find the best segmentation.
843 bool is_prev_katakana
= false;
844 for (int32_t i
= 0; i
< numChars
; ++i
) {
845 //utext_setNativeIndex(text, rangeStart + i);
846 utext_setNativeIndex(&normalizedText
, i
);
847 if (bestSnlp
[i
] == kuint32max
)
851 // limit maximum word length matched to size of current substring
852 int32_t maxSearchLength
= (i
+ maxWordSize
< (size_t) numChars
)? maxWordSize
: (numChars
- i
);
854 fDictionary
->matches(&normalizedText
, maxSearchLength
, lengths
.elems(), count
, maxSearchLength
, values
.elems());
856 // if there are no single character matches found in the dictionary
857 // starting with this charcter, treat character as a 1-character word
858 // with the highest value possible, i.e. the least likely to occur.
859 // Exclude Korean characters from this treatment, as they should be left
860 // together by default.
861 if((count
== 0 || lengths
[0] != 1) &&
862 !fHangulWordSet
.contains(utext_current32(&normalizedText
))) {
863 values
[count
] = maxSnlp
;
864 lengths
[count
++] = 1;
867 for (int j
= 0; j
< count
; j
++) {
868 uint32_t newSnlp
= bestSnlp
[i
] + values
[j
];
869 if (newSnlp
< bestSnlp
[lengths
[j
] + i
]) {
870 bestSnlp
[lengths
[j
] + i
] = newSnlp
;
871 prev
[lengths
[j
] + i
] = i
;
876 // Katakana word in single character is pretty rare. So we apply
877 // the following heuristic to Katakana: any continuous run of Katakana
878 // characters is considered a candidate word with a default cost
879 // specified in the katakanaCost table according to its length.
880 //utext_setNativeIndex(text, rangeStart + i);
881 utext_setNativeIndex(&normalizedText
, i
);
882 bool is_katakana
= isKatakana(utext_current32(&normalizedText
));
883 if (!is_prev_katakana
&& is_katakana
) {
885 utext_next32(&normalizedText
);
886 // Find the end of the continuous run of Katakana characters
887 while (j
< numChars
&& (j
- i
) < kMaxKatakanaGroupLength
&&
888 isKatakana(utext_current32(&normalizedText
))) {
889 utext_next32(&normalizedText
);
892 if ((j
- i
) < kMaxKatakanaGroupLength
) {
893 uint32_t newSnlp
= bestSnlp
[i
] + getKatakanaCost(j
- i
);
894 if (newSnlp
< bestSnlp
[j
]) {
895 bestSnlp
[j
] = newSnlp
;
900 is_prev_katakana
= is_katakana
;
903 // Start pushing the optimal offset index into t_boundary (t for tentative).
904 // prev[numChars] is guaranteed to be meaningful.
905 // We'll first push in the reverse order, i.e.,
906 // t_boundary[0] = numChars, and afterwards do a swap.
907 // TODO: Replace by UVector32.
908 AutoBuffer
<int, maxWordSize
> t_boundary(numChars
+ 1);
911 // No segmentation found, set boundary to end of range
912 if (bestSnlp
[numChars
] == kuint32max
) {
913 t_boundary
[numBreaks
++] = numChars
;
915 for (int i
= numChars
; i
> 0; i
= prev
[i
]) {
916 t_boundary
[numBreaks
++] = i
;
918 U_ASSERT(prev
[t_boundary
[numBreaks
- 1]] == 0);
921 // Reverse offset index in t_boundary.
922 // Don't add a break for the start of the dictionary range if there is one
924 if (foundBreaks
.size() == 0 || foundBreaks
.peeki() < rangeStart
) {
925 t_boundary
[numBreaks
++] = 0;
928 // Now that we're done, convert positions in t_bdry[] (indices in
929 // the normalized input string) back to indices in the raw input string
930 // while reversing t_bdry and pushing values to foundBreaks.
931 for (int i
= numBreaks
-1; i
>= 0; i
--) {
932 foundBreaks
.push(charPositions
[t_boundary
[i
]] + rangeStart
, status
);
935 utext_close(&normalizedText
);
942 #endif /* #if !UCONFIG_NO_BREAK_ITERATION */