]> git.saurik.com Git - wxWidgets.git/blob - src/freetype/type1/t1tokens.c
changed version number
[wxWidgets.git] / src / freetype / type1 / t1tokens.c
1 /***************************************************************************/
2 /* */
3 /* t1parse.c */
4 /* */
5 /* Type 1 parser (body). */
6 /* */
7 /* Copyright 1996-2000 by */
8 /* David Turner, Robert Wilhelm, and Werner Lemberg. */
9 /* */
10 /* This file is part of the FreeType project, and may only be used, */
11 /* modified, and distributed under the terms of the FreeType project */
12 /* license, LICENSE.TXT. By continuing to use, modify, or distribute */
13 /* this file you indicate that you have read the license and */
14 /* understand and accept it fully. */
15 /* */
16 /***************************************************************************/
17
18
19 /*************************************************************************/
20 /* */
21 /* The tokenizer is in charge of loading and reading a Type1 font file */
22 /* (either in PFB or PFA format), and extracting successive tokens and */
23 /* keywords from its two streams (i.e. the font program, and the private */
24 /* dictionary). */
25 /* */
26 /* Eexec decryption is performed automatically when entering the private */
27 /* dictionary, or when retrieving char strings. */
28 /* */
29 /*************************************************************************/
30
31
32 #include <freetype/internal/ftstream.h>
33 #include <freetype/internal/ftdebug.h>
34
35
36 #ifdef FT_FLAT_COMPILE
37
38 #include "t1tokens.h"
39 #include "t1load.h"
40
41 #else
42
43 #include <type1/t1tokens.h>
44 #include <type1/t1load.h>
45
46 #endif
47
48
49 #include <string.h> /* for strncmp() */
50
51
52 #undef READ_BUFFER_INCREMENT
53 #define READ_BUFFER_INCREMENT 0x400
54
55
56 /*************************************************************************/
57 /* */
58 /* The macro FT_COMPONENT is used in trace mode. It is an implicit */
59 /* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log */
60 /* messages during execution. */
61 /* */
62 #undef FT_COMPONENT
63 #define FT_COMPONENT trace_t1load
64
65
66 /* An array of Type1 keywords supported by this engine. This table */
67 /* places the keyword in lexicographical order. It should always */
68 /* correspond to the enums `key_xxx'! */
69 /* */
70 const char* t1_keywords[key_max - key_first_] =
71 {
72 "-|", "ExpertEncoding", "ND", "NP", "RD", "StandardEncoding", "array",
73 "begin", "closefile", "currentdict", "currentfile", "def", "dict", "dup",
74 "eexec", "end", "executeonly", "false", "for", "index", "noaccess",
75 "put", "readonly", "true", "userdict", "|", "|-"
76 };
77
78
79 const char* t1_immediates[imm_max - imm_first_] =
80 {
81 "-|", ".notdef", "BlendAxisTypes", "BlueFuzz", "BlueScale", "BlueShift",
82 "BlueValues", "CharStrings", "Encoding", "FamilyBlues", "FamilyName",
83 "FamilyOtherBlues", "FID", "FontBBox", "FontID", "FontInfo", "FontMatrix",
84 "FontName", "FontType", "ForceBold", "FullName", "ItalicAngle",
85 "LanguageGroup", "Metrics", "MinFeature", "ND", "NP", "Notice",
86 "OtherBlues", "OtherSubrs", "PaintType", "Private", "RD", "RndStemUp",
87 "StdHW", "StdVW", "StemSnapH", "StemSnapV", "StrokeWidth", "Subrs",
88 "UnderlinePosition", "UnderlineThickness", "UniqueID", "Weight",
89 "isFixedPitch", "lenIV", "password", "version", "|", "|-"
90 };
91
92
93 /* lexicographic comparison of two strings */
94 static
95 int lexico_strcmp( const char* str1,
96 int str1_len,
97 const char* str2 )
98 {
99 int c2 = 0;
100
101
102 for ( ; str1_len > 0; str1_len-- )
103 {
104 int c1, diff;
105
106
107 c1 = *str1++;
108 c2 = *str2++;
109
110 diff = c1 - c2;
111 if ( diff )
112 return diff;
113 };
114
115 return -*str2;
116 }
117
118
119 /* find a given token/name, performing binary search */
120 static
121 int Find_Name( char* base,
122 int length,
123 const char** table,
124 int table_len )
125 {
126 int left, right;
127
128
129 left = 0;
130 right = table_len - 1;
131
132 while ( right - left > 1 )
133 {
134 int middle = left + ( ( right - left ) >> 1 );
135 int cmp;
136
137
138 cmp = lexico_strcmp( base, length, table[middle] );
139 if ( !cmp )
140 return middle;
141
142 if ( cmp < 0 )
143 right = middle;
144 else
145 left = middle;
146 }
147
148 if ( !lexico_strcmp( base, length, table[left ] ) )
149 return left;
150 if ( !lexico_strcmp( base, length, table[right] ) )
151 return right;
152
153 return -1;
154 }
155
156
157 /* read the small PFB section header */
158 static
159 FT_Error Read_PFB_Tag( FT_Stream stream,
160 FT_UShort* atag,
161 FT_ULong* asize )
162 {
163 FT_UShort tag;
164 FT_ULong size;
165 FT_Error error;
166
167
168 FT_TRACE2(( "Read_PFB_Tag: reading\n" ));
169
170 if ( ACCESS_Frame( 6L ) )
171 return error;
172
173 tag = GET_UShort();
174 size = GET_ULong();
175
176 FORGET_Frame();
177
178 *atag = tag;
179 *asize = ( ( size & 0xFF ) << 24 ) |
180 ( ( ( size >> 8 ) & 0xFF ) << 16 ) |
181 ( ( ( size >> 16 ) & 0xFF ) << 8 ) |
182 ( ( ( size >> 24 ) & 0xFF ) );
183
184 FT_TRACE2(( " tag = %04x\n", tag ));
185 FT_TRACE4(( " asze = %08x\n", size ));
186 FT_TRACE2(( " size = %08x\n", *asize ));
187
188 return T1_Err_Ok;
189 }
190
191
192 static
193 FT_Error grow( T1_Tokenizer tokzer )
194 {
195 FT_Error error;
196 FT_Long left_bytes;
197 FT_Memory memory = tokzer->memory;
198
199
200 left_bytes = tokzer->max - tokzer->limit;
201
202 if ( left_bytes > 0 )
203 {
204 FT_Stream stream = tokzer->stream;
205
206
207 if ( left_bytes > READ_BUFFER_INCREMENT )
208 left_bytes = READ_BUFFER_INCREMENT;
209
210 FT_TRACE2(( "Growing tokenizer buffer by %d bytes\n", left_bytes ));
211
212 if ( !REALLOC( tokzer->base, tokzer->limit,
213 tokzer->limit + left_bytes ) &&
214 !FILE_Read( tokzer->base + tokzer->limit, left_bytes ) )
215 tokzer->limit += left_bytes;
216 }
217 else
218 {
219 FT_ERROR(( "Unexpected end of Type1 fragment!\n" ));
220 error = T1_Err_Invalid_File_Format;
221 }
222
223 tokzer->error = error;
224 return error;
225 }
226
227
228 /*************************************************************************/
229 /* */
230 /* <Function> */
231 /* t1_decrypt */
232 /* */
233 /* <Description> */
234 /* Performs the Type 1 charstring decryption process. */
235 /* */
236 /* <Input> */
237 /* buffer :: The base address of the data to decrypt. */
238 /* length :: The number of bytes to decrypt (beginning from the base */
239 /* address. */
240 /* seed :: The encryption seed (4330 for charstrings). */
241 /* */
242 LOCAL_FUNC
243 void t1_decrypt( FT_Byte* buffer,
244 FT_Int length,
245 FT_UShort seed )
246 {
247 while ( length > 0 )
248 {
249 FT_Byte plain;
250
251
252 plain = ( *buffer ^ ( seed >> 8 ) );
253 seed = ( *buffer + seed ) * 52845 + 22719;
254 *buffer++ = plain;
255 length--;
256 }
257 }
258
259
260 /*************************************************************************/
261 /* */
262 /* <Function> */
263 /* New_Tokenizer */
264 /* */
265 /* <Description> */
266 /* Creates a new tokenizer from a given input stream. This function */
267 /* automatically recognizes `pfa' or `pfb' files. The function */
268 /* Read_Token() can then be used to extract successive tokens from */
269 /* the stream. */
270 /* */
271 /* <Input> */
272 /* stream :: The input stream. */
273 /* */
274 /* <Output> */
275 /* tokenizer :: A handle to a new tokenizer object. */
276 /* */
277 /* <Return> */
278 /* FreeType error code. 0 means success. */
279 /* */
280 /* <Note> */
281 /* This function copies the stream handle within the object. Callers */
282 /* should not discard `stream'. This is done by the Done_Tokenizer() */
283 /* function. */
284 /* */
285 LOCAL_FUNC
286 FT_Error New_Tokenizer( FT_Stream stream,
287 T1_Tokenizer* tokenizer )
288 {
289 FT_Memory memory = stream->memory;
290 T1_Tokenizer tokzer;
291 FT_Error error;
292 FT_UShort tag;
293 FT_ULong size;
294
295 FT_Byte* tok_base;
296 FT_ULong tok_limit;
297 FT_ULong tok_max;
298
299
300 *tokenizer = 0;
301
302 /* allocate object */
303 if ( FILE_Seek( 0L ) ||
304 ALLOC( tokzer, sizeof ( *tokzer ) ) )
305 return error;
306
307 tokzer->stream = stream;
308 tokzer->memory = stream->memory;
309
310 tokzer->in_pfb = 0;
311 tokzer->in_private = 0;
312
313 tok_base = 0;
314 tok_limit = 0;
315 tok_max = stream->size;
316
317 error = Read_PFB_Tag( stream, &tag, &size );
318 if ( error )
319 goto Fail;
320
321 if ( tag != 0x8001 )
322 {
323 /* assume that it is a PFA file -- an error will be produced later */
324 /* if a character with value > 127 is encountered */
325
326 /* rewind to start of file */
327 if ( FILE_Seek( 0L ) )
328 goto Fail;
329
330 size = stream->size;
331 }
332 else
333 tokzer->in_pfb = 1;
334
335 /* if it is a memory-based resource, set up pointer */
336 if ( !stream->read )
337 {
338 tok_base = (FT_Byte*)stream->base + stream->pos;
339 tok_limit = size;
340 tok_max = size;
341
342 /* check that the `size' field is valid */
343 if ( FILE_Skip( size ) )
344 goto Fail;
345 }
346 else if ( tag == 0x8001 )
347 {
348 /* read segment in memory */
349 if ( ALLOC( tok_base, size ) )
350 goto Fail;
351
352 if ( FILE_Read( tok_base, size ) )
353 {
354 FREE( tok_base );
355 goto Fail;
356 }
357
358 tok_limit = size;
359 tok_max = size;
360 }
361
362 tokzer->base = tok_base;
363 tokzer->limit = tok_limit;
364 tokzer->max = tok_max;
365 tokzer->cursor = 0;
366
367 *tokenizer = tokzer;
368
369 /* now check font format; we must see `%!PS-AdobeFont-1' */
370 /* or `%!FontType' */
371 {
372 if ( 16 > tokzer->limit )
373 grow( tokzer );
374
375 if ( tokzer->limit <= 16 ||
376 ( strncmp( (const char*)tokzer->base, "%!PS-AdobeFont-1", 16 ) &&
377 strncmp( (const char*)tokzer->base, "%!FontType", 10 ) ) )
378 {
379 FT_TRACE2(( "[not a Type1 font]\n" ));
380 error = FT_Err_Unknown_File_Format;
381 goto Fail;
382 }
383 }
384 return T1_Err_Ok;
385
386 Fail:
387 FREE( tokzer->base );
388 FREE( tokzer );
389 return error;
390 }
391
392
393 /* return the value of an hexadecimal digit */
394 static
395 int hexa_value( char c )
396 {
397 unsigned int d;
398
399
400 d = (unsigned int)( c - '0' );
401 if ( d <= 9 )
402 return (int)d;
403
404 d = (unsigned int)( c - 'a' );
405 if ( d <= 5 )
406 return (int)( d + 10 );
407
408 d = (unsigned int)( c - 'A' );
409 if ( d <= 5 )
410 return (int)( d + 10 );
411
412 return -1;
413 }
414
415
416 /*************************************************************************/
417 /* */
418 /* <Function> */
419 /* Done_Tokenizer */
420 /* */
421 /* <Description> */
422 /* Closes a given tokenizer. This function will also close the */
423 /* stream embedded in the object. */
424 /* */
425 /* <Input> */
426 /* tokenizer :: The target tokenizer object. */
427 /* */
428 /* <Return> */
429 /* FreeType error code. 0 means success. */
430 /* */
431 LOCAL_FUNC
432 FT_Error Done_Tokenizer( T1_Tokenizer tokenizer )
433 {
434 FT_Memory memory = tokenizer->memory;
435
436
437 /* clear read buffer if needed (disk-based resources) */
438 if ( tokenizer->in_private || !tokenizer->stream->base )
439 FREE( tokenizer->base );
440
441 FREE( tokenizer );
442 return T1_Err_Ok;
443 }
444
445
446 /*************************************************************************/
447 /* */
448 /* <Function> */
449 /* Open_PrivateDict */
450 /* */
451 /* <Description> */
452 /* This function must be called to set the tokenizer to the private */
453 /* section of the Type1 file. It recognizes automatically the */
454 /* the kind of eexec encryption used (ascii or binary). */
455 /* */
456 /* <Input> */
457 /* tokenizer :: The target tokenizer object. */
458 /* lenIV :: The value of the `lenIV' variable. */
459 /* */
460 /* <Return> */
461 /* FreeType error code. 0 means success. */
462 /* */
463 LOCAL_FUNC
464 FT_Error Open_PrivateDict( T1_Tokenizer tokenizer )
465 {
466 T1_Tokenizer tokzer = tokenizer;
467 FT_Stream stream = tokzer->stream;
468 FT_Memory memory = tokzer->memory;
469 FT_Error error = 0;
470
471 FT_UShort tag;
472 FT_ULong size;
473
474 FT_Byte* private_dict;
475
476 /* are we already in the private dictionary ? */
477 if ( tokzer->in_private )
478 return 0;
479
480 if ( tokzer->in_pfb )
481 {
482 /* in the case of the PFB format, the private dictionary can be */
483 /* made of several segments. We thus first read the number of */
484 /* segments to compute the total size of the private dictionary */
485 /* then re-read them into memory. */
486 FT_Long start_pos = FILE_Pos();
487 FT_ULong private_dict_size = 0;
488
489
490 for (;;)
491 {
492 error = Read_PFB_Tag( stream, &tag, &size );
493 if ( error || tag != 0x8002 )
494 break;
495
496 private_dict_size += size;
497
498 if ( FILE_Skip( size ) )
499 goto Fail;
500 }
501
502 /* check that we have a private dictionary there */
503 /* and allocate private dictionary buffer */
504 if ( private_dict_size == 0 )
505 {
506 FT_ERROR(( "Open_PrivateDict:" ));
507 FT_ERROR(( " invalid private dictionary section\n" ));
508 error = T1_Err_Invalid_File_Format;
509 goto Fail;
510 }
511
512 if ( ALLOC( private_dict, private_dict_size ) )
513 goto Fail;
514
515 /* read all sections into buffer */
516 if ( FILE_Seek( start_pos ) )
517 goto Fail_Private;
518
519 private_dict_size = 0;
520 for (;;)
521 {
522 error = Read_PFB_Tag( stream, &tag, &size );
523 if ( error || tag != 0x8002 )
524 {
525 error = 0;
526 break;
527 }
528
529 if ( FILE_Read( private_dict + private_dict_size, size ) )
530 goto Fail_Private;
531
532 private_dict_size += size;
533 }
534
535 /* we must free the field `tokzer.base' if we are in a disk-based */
536 /* PFB file. */
537 if ( stream->read )
538 FREE( tokzer->base );
539
540 tokzer->base = private_dict;
541 tokzer->cursor = 0;
542 tokzer->limit = private_dict_size;
543 tokzer->max = private_dict_size;
544 }
545 else
546 {
547 char* base;
548
549
550 /* we are in a PFA file; read each token until we find `eexec' */
551 while ( tokzer->token.kind2 != key_eexec )
552 {
553 error = Read_Token( tokzer );
554 if ( error )
555 goto Fail;
556 }
557
558 /* now determine whether the private dictionary is encoded in binary */
559 /* or hexadecimal ASCII format. */
560
561 /* we need to access the next 4 bytes (after the final \r following */
562 /* the `eexec' keyword); if they all are hexadecimal digits, then */
563 /* we have a case of ASCII storage. */
564 while ( tokzer->cursor + 5 > tokzer->limit )
565 {
566 error = grow( tokzer );
567 if ( error )
568 goto Fail;
569 }
570
571 /* skip whitespace/line feed after `eexec' */
572 base = (char*)tokzer->base + tokzer->cursor + 1;
573 if ( ( hexa_value( base[0] ) | hexa_value( base[1] ) |
574 hexa_value( base[2] ) | hexa_value( base[3] ) ) < 0 )
575 {
576 /* binary encoding -- `simply' read the stream */
577
578 /* if it is a memory-based resource, we need to allocate a new */
579 /* storage buffer for the private dictionary, as it must be */
580 /* decrypted later */
581 if ( stream->base )
582 {
583 size = stream->size - tokzer->cursor - 1; /* remaining bytes */
584
585 if ( ALLOC( private_dict, size ) ) /* alloc private dict buffer */
586 goto Fail;
587
588 /* copy eexec-encrypted bytes */
589 MEM_Copy( private_dict, tokzer->base + tokzer->cursor + 1, size );
590
591 /* reset pointers - forget about file mapping */
592 tokzer->base = private_dict;
593 tokzer->limit = size;
594 tokzer->max = size;
595 tokzer->cursor = 0;
596 }
597 /* On the opposite, for disk based resources, we simply grow */
598 /* the current buffer until its completion, and decrypt the */
599 /* bytes within it. In all cases, the `base' buffer will be */
600 /* discarded on DoneTokenizer if we are in the private dict. */
601 else
602 {
603 /* grow the read buffer to the full file */
604 while ( tokzer->limit < tokzer->max )
605 {
606 error = grow( tokenizer );
607 if ( error )
608 goto Fail;
609 }
610
611 /* set up cursor to first encrypted byte */
612 tokzer->cursor++;
613 }
614 }
615 else
616 {
617 /* ASCII hexadecimal encoding. This sucks... */
618 FT_Byte* write;
619 FT_Byte* cur;
620 FT_Byte* limit;
621 FT_Int count;
622
623
624 /* allocate a buffer, read each one byte at a time */
625 count = stream->size - tokzer->cursor;
626 size = count / 2;
627
628 if ( ALLOC( private_dict, size ) ) /* alloc private dict buffer */
629 goto Fail;
630
631 write = private_dict;
632 cur = tokzer->base + tokzer->cursor;
633 limit = tokzer->base + tokzer->limit;
634
635 /* read each bytes */
636 while ( count > 0 )
637 {
638 /* ensure that we can read the next 2 bytes! */
639 while ( cur + 2 > limit )
640 {
641 int cursor = cur - tokzer->base;
642
643
644 error = grow( tokzer );
645 if ( error )
646 goto Fail_Private;
647 cur = tokzer->base + cursor;
648 limit = tokzer->base + tokzer->limit;
649 }
650
651 /* check for new line */
652 if ( cur[0] == '\r' || cur[0] == '\n' )
653 {
654 cur++;
655 count--;
656 }
657 else
658 {
659 int hex1 = hexa_value(cur[0]);
660
661
662 /* exit if we have a non-hexadecimal digit which isn't */
663 /* a new-line character */
664 if ( hex1 < 0 )
665 break;
666
667 /* otherwise, store byte */
668 *write++ = ( hex1 << 4 ) | hexa_value( cur[1] );
669 cur += 2;
670 count -= 2;
671 }
672 }
673
674 /* get rid of old buffer in the case of disk-based resources */
675 if ( !stream->base )
676 FREE( tokzer->base );
677
678 /* set up pointers */
679 tokzer->base = private_dict;
680 tokzer->limit = size;
681 tokzer->max = size;
682 tokzer->cursor = 0;
683 }
684 }
685
686 /* finally, decrypt the private dictionary - and skip the lenIV bytes */
687 t1_decrypt( tokzer->base, tokzer->limit, 55665 );
688 tokzer->cursor += 4;
689
690 Fail:
691 return error;
692
693 Fail_Private:
694 FREE( private_dict );
695 goto Fail;
696 }
697
698
699 /*************************************************************************/
700 /* */
701 /* <Function> */
702 /* Read_Token */
703 /* */
704 /* <Description> */
705 /* Reads a new token from the current input stream. This function */
706 /* extracts a token from the font program until Open_PrivateDict() */
707 /* has been called. After this, it returns tokens from the */
708 /* (eexec-encrypted) private dictionary. */
709 /* */
710 /* <Input> */
711 /* tokenizer :: The target tokenizer object. */
712 /* */
713 /* <Return> */
714 /* FreeType error code. 0 means success. */
715 /* */
716 /* <Note> */
717 /* Use the function Read_CharStrings() to read the binary charstrings */
718 /* from the private dict. */
719 /* */
720 LOCAL_FUNC
721 FT_Error Read_Token( T1_Tokenizer tokenizer )
722 {
723 T1_Tokenizer tok = tokenizer;
724 FT_Long cur, limit;
725 FT_Byte* base;
726 char c, starter, ender;
727 FT_Bool token_started;
728
729 T1_TokenType kind;
730
731
732 tok->error = T1_Err_Ok;
733 tok->token.kind = tok_any;
734
735 base = tok->base;
736 limit = tok->limit;
737 cur = tok->cursor;
738
739 token_started = 0;
740
741 for (;;)
742 {
743 if ( cur >= limit )
744 {
745 if ( grow( tok ) )
746 goto Exit;
747 base = tok->base;
748 limit = tok->limit;
749 }
750
751 c = (char)base[cur++];
752
753 /* check that we have an ASCII character */
754 if ( (FT_Byte)c > 127 )
755 {
756 FT_ERROR(( "Read_Token:" ));
757 FT_ERROR(( " unexpected binary data in Type1 fragment!\n" ));
758 tok->error = T1_Err_Invalid_File_Format;
759 goto Exit;
760 }
761
762 switch ( c )
763 {
764 case '\r':
765 case '\n':
766 case ' ' :
767 case '\t': /* skip initial whitespace => skip to next */
768 if ( token_started )
769 {
770 /* possibly a name, keyword, wathever */
771 tok->token.kind = tok_any;
772 tok->token.len = cur-tok->token.start - 1;
773 goto Exit;
774 }
775 /* otherwise, skip everything */
776 break;
777
778 case '%': /* this is a comment -- skip everything */
779 for (;;)
780 {
781 FT_Int left = limit - cur;
782
783
784 while ( left > 0 )
785 {
786 c = (char)base[cur++];
787 if ( c == '\r' || c == '\n' )
788 goto Next;
789 left--;
790 }
791
792 if ( grow( tokenizer ) )
793 goto Exit;
794 base = tok->base;
795 limit = tok->limit;
796 }
797
798 case '(': /* a Postscript string */
799 kind = tok_string;
800 ender = ')';
801
802 L1:
803 if ( !token_started )
804 {
805 token_started = 1;
806 tok->token.start = cur - 1;
807 }
808
809 {
810 FT_Int nest_level = 1;
811
812
813 starter = c;
814 for (;;)
815 {
816 FT_Int left = limit - cur;
817
818
819 while ( left > 0 )
820 {
821 c = (char)base[cur++];
822
823 if ( c == starter )
824 nest_level++;
825
826 else if ( c == ender )
827 {
828 nest_level--;
829 if ( nest_level <= 0 )
830 {
831 tok->token.kind = kind;
832 tok->token.len = cur - tok->token.start;
833 goto Exit;
834 }
835 }
836 left--;
837 }
838
839 if ( grow( tok ) )
840 goto Exit;
841 base = tok->base;
842 limit = tok->limit;
843 }
844 }
845
846 case '[': /* a Postscript array */
847 if ( token_started )
848 goto Any_Token;
849
850 kind = tok_array;
851 ender = ']';
852 goto L1;
853 break;
854
855 case '{': /* a Postscript program */
856 if ( token_started )
857 goto Any_Token;
858
859 kind = tok_program;
860 ender = '}';
861 goto L1;
862 break;
863
864 case '<': /* a Postscript hex byte array? */
865 if ( token_started )
866 goto Any_Token;
867
868 kind = tok_hexarray;
869 ender = '>';
870 goto L1;
871 break;
872
873 case '0': /* any number */
874 case '1':
875 case '2':
876 case '3':
877 case '4':
878 case '5':
879 case '6':
880 case '7':
881 case '8':
882 case '9':
883 if ( token_started )
884 goto Next;
885
886 tok->token.kind = tok_number;
887 token_started = 1;
888 tok->token.start = cur - 1;
889
890 L2:
891 for (;;)
892 {
893 FT_Int left = limit-cur;
894
895
896 while ( left > 0 )
897 {
898 c = (char)base[cur++];
899
900 switch ( c )
901 {
902 case '[': /* ] */
903 case '{': /* } */
904 case '(': /* ) */
905 case '<':
906 case '/':
907 goto Any_Token;
908
909 case ' ':
910 case '\r':
911 case '\t':
912 case '\n':
913 tok->token.len = cur - tok->token.start - 1;
914 goto Exit;
915
916 default:
917 ;
918 }
919 left--;
920 }
921
922 if ( grow( tok ) )
923 goto Exit;
924 base = tok->base;
925 limit = tok->limit;
926 }
927
928 case '.': /* maybe a number */
929 case '-':
930 case '+':
931 if ( token_started )
932 goto Next;
933
934 token_started = 1;
935 tok->token.start = cur - 1;
936
937 for (;;)
938 {
939 FT_Int left = limit - cur;
940
941
942 if ( left > 0 )
943 {
944 /* test for any following digit, interpreted as number */
945 c = (char)base[cur];
946 tok->token.kind = ( c >= '0' && c <= '9' ? tok_number : tok_any );
947 goto L2;
948 }
949
950 if ( grow( tok ) )
951 goto Exit;
952 base = tok->base;
953 limit = tok->limit;
954 }
955
956 case '/': /* maybe an immediate name */
957 if ( !token_started )
958 {
959 token_started = 1;
960 tok->token.start = cur - 1;
961
962 for (;;)
963 {
964 FT_Int left = limit - cur;
965
966
967 if ( left > 0 )
968 {
969 /* test for single '/', interpreted as garbage */
970 c = (char)base[cur];
971 tok->token.kind = ( c == ' ' || c == '\t' ||
972 c == '\r' || c == '\n' ) ? tok_any
973 : tok_immediate;
974 goto L2;
975 }
976
977 if ( grow( tok ) )
978 goto Exit;
979 base = tok->base;
980 limit = tok->limit;
981 }
982 }
983 else
984 {
985 Any_Token: /* possibly a name or wathever */
986 cur--;
987 tok->token.len = cur - tok->token.start;
988 goto Exit;
989 }
990
991 default:
992 if ( !token_started )
993 {
994 token_started = 1;
995 tok->token.start = cur - 1;
996 }
997 }
998
999 Next:
1000 ;
1001 }
1002
1003 Exit:
1004 tok->cursor = cur;
1005
1006 if ( !tok->error )
1007 {
1008 /* now, tries to match keywords and immediate names */
1009 FT_Int index;
1010
1011
1012 switch ( tok->token.kind )
1013 {
1014 case tok_immediate: /* immediate name */
1015 index = Find_Name( (char*)( tok->base + tok->token.start + 1 ),
1016 tok->token.len - 1,
1017 t1_immediates,
1018 imm_max - imm_first_ );
1019 tok->token.kind2 = ( index >= 0 )
1020 ? (T1_TokenType)( imm_first_ + index )
1021 : tok_error;
1022 break;
1023
1024 case tok_any: /* test for keyword */
1025 index = Find_Name( (char*)( tok->base + tok->token.start ),
1026 tok->token.len,
1027 t1_keywords,
1028 key_max - key_first_ );
1029 if ( index >= 0 )
1030 {
1031 tok->token.kind = tok_keyword;
1032 tok->token.kind2 = (T1_TokenType)( key_first_ + index );
1033 }
1034 else
1035 tok->token.kind2 = tok_error;
1036 break;
1037
1038 default:
1039 tok->token.kind2 = tok_error;
1040 }
1041 }
1042 return tokenizer->error;
1043 }
1044
1045
1046 #if 0
1047
1048 /*************************************************************************/
1049 /* */
1050 /* <Function> */
1051 /* Read_CharStrings */
1052 /* */
1053 /* <Description> */
1054 /* Reads a charstrings element from the current input stream. These */
1055 /* are binary bytes that encode each individual glyph outline. */
1056 /* */
1057 /* The caller is responsible for skipping the `lenIV' bytes at the */
1058 /* start of the record. */
1059 /* */
1060 /* <Input> */
1061 /* tokenizer :: The target tokenizer object. */
1062 /* num_chars :: The number of binary bytes to read. */
1063 /* */
1064 /* <Output> */
1065 /* buffer :: The target array of bytes. These are */
1066 /* eexec-decrypted. */
1067 /* */
1068 /* <Return> */
1069 /* FreeType error code. 0 means success. */
1070 /* */
1071 /* <Note> */
1072 /* Use the function Read_CharStrings() to read binary charstrings */
1073 /* from the private dict. */
1074 /* */
1075 LOCAL_FUNC
1076 FT_Error Read_CharStrings( T1_Tokenizer tokenizer,
1077 FT_Int num_chars,
1078 FT_Byte* buffer )
1079 {
1080 for (;;)
1081 {
1082 FT_Int left = tokenizer->limit - tokenizer->cursor;
1083
1084
1085 if ( left >= num_chars )
1086 {
1087 MEM_Copy( buffer, tokenizer->base + tokenizer->cursor, num_chars );
1088 t1_decrypt( buffer, num_chars, 4330 );
1089 tokenizer->cursor += num_chars;
1090 return T1_Err_Ok;
1091 }
1092
1093 if ( grow( tokenizer ) )
1094 return tokenizer->error;
1095 }
1096 }
1097
1098 #endif /* 0 */
1099
1100
1101 /* END */