?? yapps2.py
字號:
for x in testset: if x in tokens_seen: testset.remove(x) removed.append(x) if x in tokens_unseen: tokens_unseen.remove(x) tokens_seen = tokens_seen + testset if removed: if not testset: print 'Error in rule', self.rule+':', c, 'never matches.' else: print 'Warning:', self print ' * These tokens are being ignored:', join(removed, ', ') print ' due to previous choices using them.' if testset: if not tokens_unseen: # context sensitive scanners only! if test=='if': # if it's the first AND last test, then # we can simply put the code without an if/else c.output(gen, indent) else: gen.write(indent, "else: ") t = gen.in_test('', [], testset) if len(t) < 70-len(indent): gen.write("#", t) gen.write("\n") c.output(gen, indent+INDENT) else: gen.write(indent, test, " ", gen.in_test('_token_', tokens_unseen, testset), ":\n") c.output(gen, indent+INDENT) test = "elif" if gen['context-insensitive-scanner'] and tokens_unseen: gen.write(indent, "else:\n") gen.write(indent, INDENT, "raise SyntaxError(self._pos, ") gen.write("'Could not match ", self.rule, "')\n")class Wrapper(Node): def __init__(self, child): Node.__init__(self) self.child = child def setup(self, gen, rule): Node.setup(self, gen, rule) self.child.setup(gen, rule) def get_children(self): return [self.child] def update(self, gen): Node.update(self, gen) self.child.update(gen) gen.add_to(self.first, self.child.first) gen.equate(self.follow, self.child.follow)class Option(Wrapper): def setup(self, gen, rule): Wrapper.setup(self, gen, rule) if not self.accepts_epsilon: self.accepts_epsilon = 1 gen.changed() def __str__(self): return '[ %s ]' % str(self.child) def output(self, gen, indent): if self.child.accepts_epsilon: print 'Warning in rule', self.rule+': contents may be empty.' gen.write(indent, "if %s:\n" % gen.peek_test(self.first, self.child.first)) self.child.output(gen, indent+INDENT) class Plus(Wrapper): def setup(self, gen, rule): Wrapper.setup(self, gen, rule) if self.accepts_epsilon != self.child.accepts_epsilon: self.accepts_epsilon = self.child.accepts_epsilon gen.changed() def __str__(self): return '%s+' % str(self.child) def update(self, gen): Wrapper.update(self, gen) gen.add_to(self.follow, self.first) def output(self, gen, indent): if self.child.accepts_epsilon: print 'Warning in rule', self.rule+':' print ' * The repeated pattern could be empty. The resulting' print ' parser may not work properly.' gen.write(indent, "while 1:\n") self.child.output(gen, indent+INDENT) union = self.first[:] gen.add_to(union, self.follow) gen.write(indent+INDENT, "if %s: break\n" % gen.not_peek_test(union, self.child.first))class Star(Plus): def setup(self, gen, rule): Wrapper.setup(self, gen, rule) if not self.accepts_epsilon: self.accepts_epsilon = 1 gen.changed() def __str__(self): return '%s*' % str(self.child) def output(self, gen, indent): if self.child.accepts_epsilon: print 'Warning in rule', self.rule+':' print ' * The repeated pattern could be empty. The resulting' print ' parser probably will not work properly.' gen.write(indent, "while %s:\n" % gen.peek_test(self.follow, self.child.first)) self.child.output(gen, indent+INDENT)####################################################################### The remainder of this file is from parsedesc.{g,py}def append(lst, x): "Imperative append" lst.append(x) return lstdef add_inline_token(tokens, str): tokens.insert( 0, (str, eval(str, {}, {})) ) return Terminal(str)def cleanup_choice(lst): if len(lst) == 0: return Sequence([]) if len(lst) == 1: return lst[0] return apply(Choice, tuple(lst))def cleanup_sequence(lst): if len(lst) == 1: return lst[0] return apply(Sequence, tuple(lst))def cleanup_rep(node, rep): if rep == 'star': return Star(node) elif rep == 'plus': return Plus(node) else: return nodedef resolve_name(tokens, id, args): if id in map(lambda x: x[0], tokens): # It's a token if args: print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args) return Terminal(id) else: # It's a name, so assume it's a nonterminal return NonTerminal(id, args)from string import *import refrom yappsrt import *class ParserDescriptionScanner(Scanner): def __init__(self, str): Scanner.__init__(self,[ ('"rule"', 'rule'), ('"ignore"', 'ignore'), ('"token"', 'token'), ('"option"', 'option'), ('":"', ':'), ('"parser"', 'parser'), ('[ \011\015\012]+', '[ \011\015\012]+'), ('#.*?\015?\012', '#.*?\015?\012'), ('END', '$'), ('ATTR', '<<.+?>>'), ('STMT', '{{.+?}}'), ('ID', '[a-zA-Z_][a-zA-Z_0-9]*'), ('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'), ('LP', '\\('), ('RP', '\\)'), ('LB', '\\['), ('RB', '\\]'), ('OR', '[|]'), ('STAR', '[*]'), ('PLUS', '[+]'), ], ['[ \011\015\012]+', '#.*?\015?\012'], str)class ParserDescription(Parser): def Parser(self): self._scan('"parser"') ID = self._scan('ID') self._scan('":"') Options = self.Options() Tokens = self.Tokens() Rules = self.Rules(Tokens) END = self._scan('END') return Generator(ID,Options,Tokens,Rules) def Options(self): opt = {} while self._peek('"option"', '"token"', '"ignore"', 'END', '"rule"') == '"option"': self._scan('"option"') self._scan('":"') Str = self.Str() opt[Str] = 1 return opt def Tokens(self): tok = [] while self._peek('"token"', '"ignore"', 'END', '"rule"') in ['"token"', '"ignore"']: _token_ = self._peek('"token"', '"ignore"') if _token_ == '"token"': self._scan('"token"') ID = self._scan('ID') self._scan('":"') Str = self.Str() tok.append( (ID,Str) ) else: # == '"ignore"' self._scan('"ignore"') self._scan('":"') Str = self.Str() tok.append( ('#ignore',Str) ) return tok def Rules(self, tokens): rul = [] while self._peek('"rule"', 'END') == '"rule"': self._scan('"rule"') ID = self._scan('ID') OptParam = self.OptParam() self._scan('":"') ClauseA = self.ClauseA(tokens) rul.append( (ID,OptParam,ClauseA) ) return rul def ClauseA(self, tokens): ClauseB = self.ClauseB(tokens) v = [ClauseB] while self._peek('OR', 'RP', 'RB', '"rule"', 'END') == 'OR': OR = self._scan('OR') ClauseB = self.ClauseB(tokens) v.append(ClauseB) return cleanup_choice(v) def ClauseB(self, tokens): v = [] while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') in ['STR', 'ID', 'LP', 'LB', 'STMT']: ClauseC = self.ClauseC(tokens) v.append(ClauseC) return cleanup_sequence(v) def ClauseC(self, tokens): ClauseD = self.ClauseD(tokens) _token_ = self._peek('PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') if _token_ == 'PLUS': PLUS = self._scan('PLUS') return Plus(ClauseD) elif _token_ == 'STAR': STAR = self._scan('STAR') return Star(ClauseD) else: return ClauseD def ClauseD(self, tokens): _token_ = self._peek('STR', 'ID', 'LP', 'LB', 'STMT') if _token_ == 'STR': STR = self._scan('STR') t = (STR, eval(STR,{},{})) if t not in tokens: tokens.insert( 0, t ) return Terminal(STR) elif _token_ == 'ID': ID = self._scan('ID') OptParam = self.OptParam() return resolve_name(tokens, ID, OptParam) elif _token_ == 'LP': LP = self._scan('LP') ClauseA = self.ClauseA(tokens) RP = self._scan('RP') return ClauseA elif _token_ == 'LB': LB = self._scan('LB') ClauseA = self.ClauseA(tokens) RB = self._scan('RB') return Option(ClauseA) else: # == 'STMT' STMT = self._scan('STMT') return Eval(STMT[2:-2]) def OptParam(self): if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') == 'ATTR': ATTR = self._scan('ATTR') return ATTR[2:-2] return '' def Str(self): STR = self._scan('STR') return eval(STR,{},{})# This replaces the default main routineyapps_options = [ ('context-insensitive-scanner', 'context-insensitive-scanner', 'Scan all tokens (see docs)') ]def generate(inputfilename, outputfilename='', dump=0, **flags): """Generate a grammar, given an input filename (X.g) and an output filename (defaulting to X.py).""" if not outputfilename: if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.py' else: raise "Invalid Filename", outputfilename print 'Input Grammar:', inputfilename print 'Output File:', outputfilename DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers preparser, postparser = None, None # Code before and after the parser desc # Read the entire file s = open(inputfilename,'r').read() # See if there's a separation between the pre-parser and parser f = find(s, DIVIDER) if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):] # See if there's a separation between the parser and post-parser f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):] # Create the parser and scanner p = ParserDescription(ParserDescriptionScanner(s)) if not p: return # Now parse the file t = wrap_error_reporter(p, 'Parser') if not t: return # Error if preparser is not None: t.preparser = preparser if postparser is not None: t.postparser = postparser # Check the options for f in t.options.keys(): for opt,_,_ in yapps_options: if f == opt: break else: print 'Warning: unrecognized option', f # Add command line options to the set for f in flags.keys(): t.options[f] = flags[f] # Generate the output if dump: t.dump_information() else: t.output = open(outputfilename, 'w') t.generate_output()if __name__=='__main__': import sys, getopt optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump']) if not args or len(args) > 2: print 'Usage:' print ' python', sys.argv[0], '[flags] input.g [output.py]' print 'Flags:' print (' --dump' + ' '*40)[:35] + 'Dump out grammar information' for flag, _, doc in yapps_options: print (' -f' + flag + ' '*40)[:35] + doc else: # Read in the options and create a list of flags flags = {} for opt in optlist: for flag, name, _ in yapps_options: if opt == ('-f', flag): flags[name] = 1 break else: if opt == ('--dump', ''): flags['dump'] = 1 else: print 'Warning - unrecognized option: ', opt[0], opt[1] apply(generate, tuple(args), flags)
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -