gcc中C语言的词法分析是怎么做的?
版本4.3.0,原以为会使用flex,搜索到一些l文件,看内容不像。上网搜,说会调用c-opts.c中c_common_parse_file,循着这个线索找了下去,最好找到libcpp/lex.c中的_cpp_lex_direct,发现尽然是手动分析。
- cpp_token *
- _cpp_lex_direct (cpp_reader *pfile)
- {
- cppchar_t c;
- cpp_buffer *buffer;
- const unsigned char *comment_start;
- cpp_token *result = pfile->cur_token++;
- fresh_line:
- result->flags = 0;
- buffer = pfile->buffer;
- if (buffer->need_line)
- {
- if (pfile->state.in_deferred_pragma)
- {
- result->type = CPP_PRAGMA_EOL;
- pfile->state.in_deferred_pragma = false;
- if (!pfile->state.pragma_allow_expansion)
- pfile->state.prevent_expansion--;
- return result;
- }
- if (!_cpp_get_fresh_line (pfile))
- {
- result->type = CPP_EOF;
- if (!pfile->state.in_directive)
- {
- /* Tell the compiler the line number of the EOF token. */
- result->src_loc = pfile->line_table->highest_line;
- result->flags = BOL;
- }
- return result;
- }
- if (!pfile->keep_tokens)
- {
- pfile->cur_run = &pfile->base_run;
- result = pfile->base_run.base;
- pfile->cur_token = result + 1;
- }
- result->flags = BOL;
- if (pfile->state.parsing_args == 2)
- result->flags |= PREV_WHITE;
- }
- buffer = pfile->buffer;
- update_tokens_line:
- result->src_loc = pfile->line_table->highest_line;
- skipped_white:
- if (buffer->cur >= buffer->notes[buffer->cur_note].pos
- && !pfile->overlaid_buffer)
- {
- _cpp_process_line_notes (pfile, false);
- result->src_loc = pfile->line_table->highest_line;
- }
- c = *buffer->cur++;
- LINEMAP_POSITION_FOR_COLUMN (result->src_loc, pfile->line_table,
- CPP_BUF_COLUMN (buffer, buffer->cur));
- switch (c)
- {
- case ' ': case '\t': case '\f': case '\v': case '\0':
- result->flags |= PREV_WHITE;
- skip_whitespace (pfile, c);
- goto skipped_white;
- case '\n':
- if (buffer->cur < buffer->rlimit)
- CPP_INCREMENT_LINE (pfile, 0);
- buffer->need_line = true;
- goto fresh_line;
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- {
- struct normalize_state nst = INITIAL_NORMALIZE_STATE;
- result->type = CPP_NUMBER;
- lex_number (pfile, &result->val.str, &nst);
- warn_about_normalization (pfile, result, &nst);
- break;
- }
- case 'L':
- /* 'L' may introduce wide characters or strings. */
- if (*buffer->cur == '\'' || *buffer->cur == '"')
- {
- lex_string (pfile, result, buffer->cur - 1);
- break;
- }
- /* Fall through. */
- case '_':
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
- case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
- case 's': case 't': case 'u': case 'v': case 'w': case 'x':
- case 'y': case 'z':
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- case 'G': case 'H': case 'I': case 'J': case 'K':
- case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
- case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
- case 'Y': case 'Z':
- result->type = CPP_NAME;
- {
- struct normalize_state nst = INITIAL_NORMALIZE_STATE;
- result->val.node = lex_identifier (pfile, buffer->cur - 1, false,
- &nst);
- warn_about_normalization (pfile, result, &nst);
- }
- /* Convert named operators to their proper types. */
- if (result->val.node->flags & NODE_OPERATOR)
- {
- result->flags |= NAMED_OP;
- result->type = (enum cpp_ttype) result->val.node->directive_index;
- }
- break;
- case '\'':
- case '"':
- lex_string (pfile, result, buffer->cur - 1);
- break;
- case '/':
- /* A potential block or line comment. */
- comment_start = buffer->cur;
- c = *buffer->cur;
- if (c == '*')
- {
- if (_cpp_skip_block_comment (pfile))
- cpp_error (pfile, CPP_DL_ERROR, "unterminated comment");
- }
- else if (c == '/' && (CPP_OPTION (pfile, cplusplus_comments)
- || cpp_in_system_header (pfile)))
- {
- /* Warn about comments only if pedantically GNUC89, and not
- in system headers. */
- if (CPP_OPTION (pfile, lang) == CLK_GNUC89 && CPP_PEDANTIC (pfile)
- && ! buffer->warned_cplusplus_comments)
- {
- cpp_error (pfile, CPP_DL_PEDWARN,
- "C++ style comments are not allowed in ISO C90");
- cpp_error (pfile, CPP_DL_PEDWARN,
- "(this will be reported only once per input file)");
- buffer->warned_cplusplus_comments = 1;
- }
- if (skip_line_comment (pfile) && CPP_OPTION (pfile, warn_comments))
- cpp_error (pfile, CPP_DL_WARNING, "multi-line comment");
- }
- else if (c == '=')
- {
- buffer->cur++;
- result->type = CPP_DIV_EQ;
- break;
- }
- else
- {
- result->type = CPP_DIV;
- break;
- }
- if (!pfile->state.save_comments)
- {
- result->flags |= PREV_WHITE;
- goto update_tokens_line;
- }
- /* Save the comment as a token in its own right. */
- save_comment (pfile, result, comment_start, c);
- break;
- case '<':
- if (pfile->state.angled_headers)
- {
- lex_string (pfile, result, buffer->cur - 1);
- break;
- }
- result->type = CPP_LESS;
- if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_LESS_EQ;
- else if (*buffer->cur == '<')
- {
- buffer->cur++;
- IF_NEXT_IS ('=', CPP_LSHIFT_EQ, CPP_LSHIFT);
- }
- else if (CPP_OPTION (pfile, digraphs))
- {
- if (*buffer->cur == ':')
- {
- buffer->cur++;
- result->flags |= DIGRAPH;
- result->type = CPP_OPEN_SQUARE;
- }
- else if (*buffer->cur == '%')
- {
- buffer->cur++;
- result->flags |= DIGRAPH;
- result->type = CPP_OPEN_BRACE;
- }
- }
- break;
- case '>':
- result->type = CPP_GREATER;
- if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_GREATER_EQ;
- else if (*buffer->cur == '>')
- {
- buffer->cur++;
- IF_NEXT_IS ('=', CPP_RSHIFT_EQ, CPP_RSHIFT);
- }
- break;
- case '%':
- result->type = CPP_MOD;
- if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_MOD_EQ;
- else if (CPP_OPTION (pfile, digraphs))
- {
- if (*buffer->cur == ':')
- {
- buffer->cur++;
- result->flags |= DIGRAPH;
- result->type = CPP_HASH;
- if (*buffer->cur == '%' && buffer->cur[1] == ':')
- buffer->cur += 2, result->type = CPP_PASTE;
- }
- else if (*buffer->cur == '>')
- {
- buffer->cur++;
- result->flags |= DIGRAPH;
- result->type = CPP_CLOSE_BRACE;
- }
- }
- break;
- case '.':
- result->type = CPP_DOT;
- if (ISDIGIT (*buffer->cur))
- {
- struct normalize_state nst = INITIAL_NORMALIZE_STATE;
- result->type = CPP_NUMBER;
- lex_number (pfile, &result->val.str, &nst);
- warn_about_normalization (pfile, result, &nst);
- }
- else if (*buffer->cur == '.' && buffer->cur[1] == '.')
- buffer->cur += 2, result->type = CPP_ELLIPSIS;
- else if (*buffer->cur == '*' && CPP_OPTION (pfile, cplusplus))
- buffer->cur++, result->type = CPP_DOT_STAR;
- break;
- case '+':
- result->type = CPP_PLUS;
- if (*buffer->cur == '+')
- buffer->cur++, result->type = CPP_PLUS_PLUS;
- else if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_PLUS_EQ;
- break;
- case '-':
- result->type = CPP_MINUS;
- if (*buffer->cur == '>')
- {
- buffer->cur++;
- result->type = CPP_DEREF;
- if (*buffer->cur == '*' && CPP_OPTION (pfile, cplusplus))
- buffer->cur++, result->type = CPP_DEREF_STAR;
- }
- else if (*buffer->cur == '-')
- buffer->cur++, result->type = CPP_MINUS_MINUS;
- else if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_MINUS_EQ;
- break;
- case '&':
- result->type = CPP_AND;
- if (*buffer->cur == '&')
- buffer->cur++, result->type = CPP_AND_AND;
- else if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_AND_EQ;
- break;
- case '|':
- result->type = CPP_OR;
- if (*buffer->cur == '|')
- buffer->cur++, result->type = CPP_OR_OR;
- else if (*buffer->cur == '=')
- buffer->cur++, result->type = CPP_OR_EQ;
- break;
- case ':':
- result->type = CPP_COLON;
- if (*buffer->cur == ':' && CPP_OPTION (pfile, cplusplus))
- buffer->cur++, result->type = CPP_SCOPE;
- else if (*buffer->cur == '>' && CPP_OPTION (pfile, digraphs))
- {
- buffer->cur++;
- result->flags |= DIGRAPH;
- result->type = CPP_CLOSE_SQUARE;
- }
- break;
- case '*': IF_NEXT_IS ('=', CPP_MULT_EQ, CPP_MULT); break;
- case '=': IF_NEXT_IS ('=', CPP_EQ_EQ, CPP_EQ); break;
- case '!': IF_NEXT_IS ('=', CPP_NOT_EQ, CPP_NOT); break;
- case '^': IF_NEXT_IS ('=', CPP_XOR_EQ, CPP_XOR); break;
- case '#': IF_NEXT_IS ('#', CPP_PASTE, CPP_HASH); break;
- case '?': result->type = CPP_QUERY; break;
- case '~': result->type = CPP_COMPL; break;
- case ',': result->type = CPP_COMMA; break;
- case '(': result->type = CPP_OPEN_PAREN; break;
- case ')': result->type = CPP_CLOSE_PAREN; break;
- case '[': result->type = CPP_OPEN_SQUARE; break;
- case ']': result->type = CPP_CLOSE_SQUARE; break;
- case '{': result->type = CPP_OPEN_BRACE; break;
- case '}': result->type = CPP_CLOSE_BRACE; break;
- case ';': result->type = CPP_SEMICOLON; break;
- /* @ is a punctuator in Objective-C. */
- case '@': result->type = CPP_ATSIGN; break;
- case '$':
- case '\\':
- {
- const uchar *base = --buffer->cur;
- struct normalize_state nst = INITIAL_NORMALIZE_STATE;
- if (forms_identifier_p (pfile, true, &nst))
- {
- result->type = CPP_NAME;
- result->val.node = lex_identifier (pfile, base, true, &nst);
- warn_about_normalization (pfile, result, &nst);
- break;
- }
- buffer->cur++;
- }
- default:
- create_literal (pfile, result, buffer->cur - 1, 1, CPP_OTHER);
- break;
- }
- return result;
- }
复制代码
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(9)
gcc 里的c和c++最早的时候都是用lex/yacc。后来先是c++改成了手工写的recursive descent语法分析器,然后c也改成手工写的了。
语法,词法分析比较简单,技术都成熟了,怎么写都行。
查了一下,v4.3.0的语法分析果然是用手工编写的(gcc/c-parser.c中 c_parse_file,8千行代码,还有一个gcc/cp/parser.c,2万行代码),巨汗!继续往前查,发现所能找到的最早版本v1.21(ftp://gcc.gnu.org/pub/gcc/old-releases/gcc-1/)的词法分析仍然是手工编写的,而语法分析是bison生成的。
说的是.
那么请问,如何手工编写语法分析程序呢?
例如
S->aAcBe
A->b
A->Ab
B->d
简单点纯手工编码的多数是针对LL的递归下降,大概做法就是先手工或者自动化的消除文法左递归和左因子,求first follow集合,针对LR的也存在一种叫低递归上升的编码方式,不过我不会也没见过这个,你可以搜一下。
递归下降比较好懂的例子就是计算器(里面有我刚学这类东西的时候扯的淡,这些淡你最好过滤掉它们,我当时也是一脑子浆糊)。
PS:这类东西都是成套路的,入门的话最好还是先学点简单的图算法,最起码龙书上那点自动机相关的(LL,SLR,LR,LALR)parser算法往图搜索上一套基本上就出来了,区别不过是构造顶点和边的方式不同(我个人认为纯手工的递归下降或者表驱动都属于对生成的图的用法不同,彻底弄明白一个也就都明白了)。
[ 本帖最后由 Solidus 于 2009-5-21 05:08 编辑 ]
撑啊~好贴子~
首先要对语法做变换,消除A->Ab中的左递归,变换后的语法是
S->aAcBe
A->bA | epsilon
B->d
这是一个LL(1) grammar, 龙书第二版第四章算法4.31就是LL(1) parsing table的构造算法,算法4.34是表驱动的predictive parsing算法
[ 本帖最后由 gta 于 2009-5-25 22:19 编辑 ]
將 A- >b| Ab 改成
A->bA | epsilon
是有問題的
如此會產生'acde'這個string, 而此string
不屬於原本的grammar
left recursive 的改寫公式如下
A -> Aa|b
改寫成
A -> bR
R -> aR | epsilon
故於原始grammar裡,
A->b|Ab
可修改成
A->bR
R->bR | epsilon
LL(1) parser的建構方式很簡單,以
S->aAcBe為例:
S()
{
if(lookahead() == 'a')
match('a);
else
throw error;
A();
if(lookahead() == 'c')
match('c');
else
throw error;
B();
if(lookahead() == 'e')
match('e');
else
throw error;
}
其中 lookahead 會取出下一個token, match 會比較並消耗一個token