BATOSAY Shell
Server IP : 170.10.162.208  /  Your IP : 216.73.216.181
Web Server : LiteSpeed
System : Linux altar19.supremepanel19.com 4.18.0-553.69.1.lve.el8.x86_64 #1 SMP Wed Aug 13 19:53:59 UTC 2025 x86_64
User : deltahospital ( 1806)
PHP Version : 7.4.33
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : OFF  |  Pkexec : OFF
Directory :  /tmp/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ HOME ]     

Current File : /tmp/phpdwJjpu
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

import os
import re

# Select the correct location for the development files based on a
# path variable (optionally read from a configuration file)
class PathChooser(object):
    def __init__(self, pathname):
        self.config = dict()
        if not os.path.exists(pathname):
            self.config_pathname = "(defaults)"
            self.config["SELINUX_DEVEL_PATH"] = "/usr/share/selinux/default:/usr/share/selinux/mls:/usr/share/selinux/devel"
            return
        self.config_pathname = pathname
        ignore = re.compile(r"^\s*(?:#.+)?$")
        consider = re.compile(r"^\s*(\w+)\s*=\s*(.+?)\s*$")
        with open(pathname, "r") as fd:
            for lineno, line in enumerate(fd):
                if ignore.match(line): continue
                mo = consider.match(line)
                if not mo:
                    raise ValueError("%s:%d: line is not in key = value format" % (pathname, lineno+1))
                self.config[mo.group(1)] = mo.group(2)

    # We're only exporting one useful function, so why not be a function
    def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
        paths = self.config.get(pathset, None)
        if paths is None:
            raise ValueError("%s was not in %s" % (pathset, self.config_pathname))
        paths = paths.split(":")
        for p in paths:
            target = os.path.join(p, testfilename)
            if os.path.exists(target): return target
        return os.path.join(paths[0], testfilename)


"""
Various default settings, including file and directory locations.
"""

def data_dir():
    return "/var/lib/sepolgen"

def perm_map():
    return data_dir() + "/perm_map"

def interface_info():
    return data_dir() + "/interface_info"

def attribute_info():
    return data_dir() + "/attribute_info"

def refpolicy_makefile():
    chooser = PathChooser("/etc/selinux/sepolgen.conf")
    result = chooser("Makefile")
    if not os.path.exists(result):
        result = chooser("include/Makefile")
    return result

def headers():
    chooser = PathChooser("/etc/selinux/sepolgen.conf")
    return chooser("include")

# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2018
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
#   this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
#   endorse or promote products derived from this software without
#  specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings.  The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system.  PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist).  However, most of the variables used during table
# construction are defined in terms of global variables.  Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing.  LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book).  LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style."   Modify the code at your
# own risk!
# ----------------------------------------------------------------------------

import re
import types
import sys
import os.path
import inspect
import warnings

__version__    = '3.11'
__tabversion__ = '3.10'

#-----------------------------------------------------------------------------
#                     === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------

yaccdebug   = True             # Debugging mode.  If set, yacc generates a
                               # a 'parser.out' file in the current directory

debug_file  = 'parser.out'     # Default name of the debugging file
tab_module  = 'parsetab'       # Default name of the table module
default_lr  = 'LALR'           # Default LR table generation method

error_count = 3                # Number of symbols that must be shifted to leave recovery mode

yaccdevel   = False            # Set to True if developing yacc.  This turns off optimized
                               # implementations of certain functions.

resultlimit = 40               # Size limit of results when running in debug mode.

pickle_protocol = 0            # Protocol to use when writing pickle files

# String type-checking compatibility
if sys.version_info[0] < 3:
    string_types = basestring
else:
    string_types = str

MAXINT = sys.maxsize

# This object is a stand-in for a logging object created by the
# logging module.   PLY will use this by default to create things
# such as the parser.out file.  If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.

class PlyLogger(object):
    def __init__(self, f):
        self.f = f

    def debug(self, msg, *args, **kwargs):
        self.f.write((msg % args) + '\n')

    info = debug

    def warning(self, msg, *args, **kwargs):
        self.f.write('WARNING: ' + (msg % args) + '\n')

    def error(self, msg, *args, **kwargs):
        self.f.write('ERROR: ' + (msg % args) + '\n')

    critical = debug

# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
    def __getattribute__(self, name):
        return self

    def __call__(self, *args, **kwargs):
        return self

# Exception raised for yacc-related errors
class YaccError(Exception):
    pass

# Format the result message that the parser produces when running in debug mode.
def format_result(r):
    repr_str = repr(r)
    if '\n' in repr_str:
        repr_str = repr(repr_str)
    if len(repr_str) > resultlimit:
        repr_str = repr_str[:resultlimit] + ' ...'
    result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
    return result

# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
    repr_str = repr(r)
    if '\n' in repr_str:
        repr_str = repr(repr_str)
    if len(repr_str) < 16:
        return repr_str
    else:
        return '<%s @ 0x%x>' % (type(r).__name__, id(r))

# Panic mode error recovery support.   This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition

_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:

    def p_error(p):
        ...
        # Use parser.errok(), parser.token(), parser.restart()
        ...

    parser = yacc.yacc()
'''

def errok():
    warnings.warn(_warnmsg)
    return _errok()

def restart():
    warnings.warn(_warnmsg)
    return _restart()

def token():
    warnings.warn(_warnmsg)
    return _token()

# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
    global _errok, _token, _restart
    _errok = parser.errok
    _token = parser.token
    _restart = parser.restart
    r = errorfunc(token)
    try:
        del _errok, _token, _restart
    except NameError:
        pass
    return r

#-----------------------------------------------------------------------------
#                        ===  LR Parsing Engine ===
#
# The following classes are used for the LR parser itself.  These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------

# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
#        .type       = Grammar symbol type
#        .value      = Symbol value
#        .lineno     = Starting line number
#        .endlineno  = Ending line number (optional, set automatically)
#        .lexpos     = Starting lex position
#        .endlexpos  = Ending lex position (optional, set automatically)

class YaccSymbol:
    def __str__(self):
        return self.type

    def __repr__(self):
        return str(self)

# This class is a wrapper around the objects actually passed to each
# grammar rule.   Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined).   The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol.  The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.

class YaccProduction:
    def __init__(self, s, stack=None):
        self.slice = s
        self.stack = stack
        self.lexer = None
        self.parser = None

    def __getitem__(self, n):
        if isinstance(n, slice):
            return [s.value for s in self.slice[n]]
        elif n >= 0:
            return self.slice[n].value
        else:
            return self.stack[n].value

    def __setitem__(self, n, v):
        self.slice[n].value = v

    def __getslice__(self, i, j):
        return [s.value for s in self.slice[i:j]]

    def __len__(self):
        return len(self.slice)

    def lineno(self, n):
        return getattr(self.slice[n], 'lineno', 0)

    def set_lineno(self, n, lineno):
        self.slice[n].lineno = lineno

    def linespan(self, n):
        startline = getattr(self.slice[n], 'lineno', 0)
        endline = getattr(self.slice[n], 'endlineno', startline)
        return startline, endline

    def lexpos(self, n):
        return getattr(self.slice[n], 'lexpos', 0)

    def set_lexpos(self, n, lexpos):
        self.slice[n].lexpos = lexpos

    def lexspan(self, n):
        startpos = getattr(self.slice[n], 'lexpos', 0)
        endpos = getattr(self.slice[n], 'endlexpos', startpos)
        return startpos, endpos

    def error(self):
        raise SyntaxError

# -----------------------------------------------------------------------------
#                               == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------

class LRParser:
    def __init__(self, lrtab, errorf):
        self.productions = lrtab.lr_productions
        self.action = lrtab.lr_action
        self.goto = lrtab.lr_goto
        self.errorfunc = errorf
        self.set_defaulted_states()
        self.errorok = True

    def errok(self):
        self.errorok = True

    def restart(self):
        del self.statestack[:]
        del self.symstack[:]
        sym = YaccSymbol()
        sym.type = '$end'
        self.symstack.append(sym)
        self.statestack.append(0)

    # Defaulted state support.
    # This method identifies parser states where there is only one possible reduction action.
    # For such states, the parser can make a choose to make a rule reduction without consuming
    # the next look-ahead token.  This delayed invocation of the tokenizer can be useful in
    # certain kinds of advanced parsing situations where the lexer and parser interact with
    # each other or change states (i.e., manipulation of scope, lexer states, etc.).
    #
    # See:  http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
    def set_defaulted_states(self):
        self.defaulted_states = {}
        for state, actions in self.action.items():
            rules = list(actions.values())
            if len(rules) == 1 and rules[0] < 0:
                self.defaulted_states[state] = rules[0]

    def disable_defaulted_states(self):
        self.defaulted_states = {}

    def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
        if debug or yaccdevel:
            if isinstance(debug, int):
                debug = PlyLogger(sys.stderr)
            return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
        elif tracking:
            return self.parseopt(input, lexer, debug, tracking, tokenfunc)
        else:
            return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)


    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # parsedebug().
    #
    # This is the debugging enabled version of parse().  All changes made to the
    # parsing engine should be made here.   Optimized versions of this function
    # are automatically created by the ply/ygen.py script.  This script cuts out
    # sections enclosed in markers such as this:
    #
    #      #--! DEBUG
    #      statements
    #      #--! DEBUG
    #
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
        #--! parsedebug-start
        lookahead = None                         # Current lookahead symbol
        lookaheadstack = []                      # Stack of lookahead symbols
        actions = self.action                    # Local reference to action table (to avoid lookup on self.)
        goto    = self.goto                      # Local reference to goto table (to avoid lookup on self.)
        prod    = self.productions               # Local reference to production list (to avoid lookup on self.)
        defaulted_states = self.defaulted_states # Local reference to defaulted states
        pslice  = YaccProduction(None)           # Production object passed to grammar rules
        errorcount = 0                           # Used during error recovery

        #--! DEBUG
        debug.info('PLY: PARSE DEBUG START')
        #--! DEBUG

        # If no lexer was given, we will try to use the lex module
        if not lexer:
            from . import lex
            lexer = lex.lexer

        # Set up the lexer and parser objects on pslice
        pslice.lexer = lexer
        pslice.parser = self

        # If input was supplied, pass to lexer
        if input is not None:
            lexer.input(input)

        if tokenfunc is None:
            # Tokenize function
            get_token = lexer.token
        else:
            get_token = tokenfunc

        # Set the parser() token method (sometimes used in error recovery)
        self.token = get_token

        # Set up the state and symbol stacks

        statestack = []                # Stack of parsing states
        self.statestack = statestack
        symstack   = []                # Stack of grammar symbols
        self.symstack = symstack

        pslice.stack = symstack         # Put in the production
        errtoken   = None               # Err token

        # The start state is assumed to be (0,$end)

        statestack.append(0)
        sym = YaccSymbol()
        sym.type = '$end'
        symstack.append(sym)
        state = 0
        while True:
            # Get the next symbol on the input.  If a lookahead symbol
            # is already set, we just use that. Otherwise, we'll pull
            # the next token off of the lookaheadstack or from the lexer

            #--! DEBUG
            debug.debug('')
            debug.debug('State  : %s', state)
            #--! DEBUG

            if state not in defaulted_states:
                if not lookahead:
                    if not lookaheadstack:
                        lookahead = get_token()     # Get the next token
                    else:
                        lookahead = lookaheadstack.pop()
                    if not lookahead:
                        lookahead = YaccSymbol()
                        lookahead.type = '$end'

                # Check the action table
                ltype = lookahead.type
                t = actions[state].get(ltype)
            else:
                t = defaulted_states[state]
                #--! DEBUG
                debug.debug('Defaulted state %s: Reduce using %d', state, -t)
                #--! DEBUG

            #--! DEBUG
            debug.debug('Stack  : %s',
                        ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
            #--! DEBUG

            if t is not None:
                if t > 0:
                    # shift a symbol on the stack
                    statestack.append(t)
                    state = t

                    #--! DEBUG
                    debug.debug('Action : Shift and goto state %s', t)
                    #--! DEBUG

                    symstack.append(lookahead)
                    lookahead = None

                    # Decrease error count on successful shift
                    if errorcount:
                        errorcount -= 1
                    continue

                if t < 0:
                    # reduce a symbol on the stack, emit a production
                    p = prod[-t]
                    pname = p.name
                    plen  = p.len

                    # Get production function
                    sym = YaccSymbol()
                    sym.type = pname       # Production name
                    sym.value = None

                    #--! DEBUG
                    if plen:
                        debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
                                   '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
                                   goto[statestack[-1-plen]][pname])
                    else:
                        debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
                                   goto[statestack[-1]][pname])

                    #--! DEBUG

                    if plen:
                        targ = symstack[-plen-1:]
                        targ[0] = sym

                        #--! TRACKING
                        if tracking:
                            t1 = targ[1]
                            sym.lineno = t1.lineno
                            sym.lexpos = t1.lexpos
                            t1 = targ[-1]
                            sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
                            sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
                        #--! TRACKING

                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # below as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            del symstack[-plen:]
                            self.state = state
                            p.callable(pslice)
                            del statestack[-plen:]
                            #--! DEBUG
                            debug.info('Result : %s', format_result(pslice[0]))
                            #--! DEBUG
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            symstack.extend(targ[1:-1])         # Put the production slice back on the stack
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                    else:

                        #--! TRACKING
                        if tracking:
                            sym.lineno = lexer.lineno
                            sym.lexpos = lexer.lexpos
                        #--! TRACKING

                        targ = [sym]

                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # above as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            self.state = state
                            p.callable(pslice)
                            #--! DEBUG
                            debug.info('Result : %s', format_result(pslice[0]))
                            #--! DEBUG
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                if t == 0:
                    n = symstack[-1]
                    result = getattr(n, 'value', None)
                    #--! DEBUG
                    debug.info('Done   : Returning %s', format_result(result))
                    debug.info('PLY: PARSE DEBUG END')
                    #--! DEBUG
                    return result

            if t is None:

                #--! DEBUG
                debug.error('Error  : %s',
                            ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
                #--! DEBUG

                # We have some kind of parsing error here.  To handle
                # this, we are going to push the current token onto
                # the tokenstack and replace it with an 'error' token.
                # If there are any synchronization rules, they may
                # catch it.
                #
                # In addition to pushing the error token, we call call
                # the user defined p_error() function if this is the
                # first syntax error.  This function is only called if
                # errorcount == 0.
                if errorcount == 0 or self.errorok:
                    errorcount = error_count
                    self.errorok = False
                    errtoken = lookahead
                    if errtoken.type == '$end':
                        errtoken = None               # End of file!
                    if self.errorfunc:
                        if errtoken and not hasattr(errtoken, 'lexer'):
                            errtoken.lexer = lexer
                        self.state = state
                        tok = call_errorfunc(self.errorfunc, errtoken, self)
                        if self.errorok:
                            # User must have done some kind of panic
                            # mode recovery on their own.  The
                            # returned token is the next lookahead
                            lookahead = tok
                            errtoken = None
                            continue
                    else:
                        if errtoken:
                            if hasattr(errtoken, 'lineno'):
                                lineno = lookahead.lineno
                            else:
                                lineno = 0
                            if lineno:
                                sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
                            else:
                                sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
                        else:
                            sys.stderr.write('yacc: Parse error in input. EOF\n')
                            return

                else:
                    errorcount = error_count

                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
                # entire parse has been rolled back and we're completely hosed.   The token is
                # discarded and we just keep going.

                if len(statestack) <= 1 and lookahead.type != '$end':
                    lookahead = None
                    errtoken = None
                    state = 0
                    # Nuke the pushback stack
                    del lookaheadstack[:]
                    continue

                # case 2: the statestack has a couple of entries on it, but we're
                # at the end of the file. nuke the top entry and generate an error token

                # Start nuking entries on the stack
                if lookahead.type == '$end':
                    # Whoa. We're really hosed here. Bail out
                    return

                if lookahead.type != 'error':
                    sym = symstack[-1]
                    if sym.type == 'error':
                        # Hmmm. Error is on top of stack, we'll just nuke input
                        # symbol and continue
                        #--! TRACKING
                        if tracking:
                            sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
                            sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
                        #--! TRACKING
                        lookahead = None
                        continue

                    # Create the error symbol for the first time and make it the new lookahead symbol
                    t = YaccSymbol()
                    t.type = 'error'

                    if hasattr(lookahead, 'lineno'):
                        t.lineno = t.endlineno = lookahead.lineno
                    if hasattr(lookahead, 'lexpos'):
                        t.lexpos = t.endlexpos = lookahead.lexpos
                    t.value = lookahead
                    lookaheadstack.append(lookahead)
                    lookahead = t
                else:
                    sym = symstack.pop()
                    #--! TRACKING
                    if tracking:
                        lookahead.lineno = sym.lineno
                        lookahead.lexpos = sym.lexpos
                    #--! TRACKING
                    statestack.pop()
                    state = statestack[-1]

                continue

            # Call an error function here
            raise RuntimeError('yacc: internal parser error!!!\n')

        #--! parsedebug-end

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # parseopt().
    #
    # Optimized version of parse() method.  DO NOT EDIT THIS CODE DIRECTLY!
    # This code is automatically generated by the ply/ygen.py script. Make
    # changes to the parsedebug() method instead.
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
        #--! parseopt-start
        lookahead = None                         # Current lookahead symbol
        lookaheadstack = []                      # Stack of lookahead symbols
        actions = self.action                    # Local reference to action table (to avoid lookup on self.)
        goto    = self.goto                      # Local reference to goto table (to avoid lookup on self.)
        prod    = self.productions               # Local reference to production list (to avoid lookup on self.)
        defaulted_states = self.defaulted_states # Local reference to defaulted states
        pslice  = YaccProduction(None)           # Production object passed to grammar rules
        errorcount = 0                           # Used during error recovery


        # If no lexer was given, we will try to use the lex module
        if not lexer:
            from . import lex
            lexer = lex.lexer

        # Set up the lexer and parser objects on pslice
        pslice.lexer = lexer
        pslice.parser = self

        # If input was supplied, pass to lexer
        if input is not None:
            lexer.input(input)

        if tokenfunc is None:
            # Tokenize function
            get_token = lexer.token
        else:
            get_token = tokenfunc

        # Set the parser() token method (sometimes used in error recovery)
        self.token = get_token

        # Set up the state and symbol stacks

        statestack = []                # Stack of parsing states
        self.statestack = statestack
        symstack   = []                # Stack of grammar symbols
        self.symstack = symstack

        pslice.stack = symstack         # Put in the production
        errtoken   = None               # Err token

        # The start state is assumed to be (0,$end)

        statestack.append(0)
        sym = YaccSymbol()
        sym.type = '$end'
        symstack.append(sym)
        state = 0
        while True:
            # Get the next symbol on the input.  If a lookahead symbol
            # is already set, we just use that. Otherwise, we'll pull
            # the next token off of the lookaheadstack or from the lexer


            if state not in defaulted_states:
                if not lookahead:
                    if not lookaheadstack:
                        lookahead = get_token()     # Get the next token
                    else:
                        lookahead = lookaheadstack.pop()
                    if not lookahead:
                        lookahead = YaccSymbol()
                        lookahead.type = '$end'

                # Check the action table
                ltype = lookahead.type
                t = actions[state].get(ltype)
            else:
                t = defaulted_states[state]


            if t is not None:
                if t > 0:
                    # shift a symbol on the stack
                    statestack.append(t)
                    state = t


                    symstack.append(lookahead)
                    lookahead = None

                    # Decrease error count on successful shift
                    if errorcount:
                        errorcount -= 1
                    continue

                if t < 0:
                    # reduce a symbol on the stack, emit a production
                    p = prod[-t]
                    pname = p.name
                    plen  = p.len

                    # Get production function
                    sym = YaccSymbol()
                    sym.type = pname       # Production name
                    sym.value = None


                    if plen:
                        targ = symstack[-plen-1:]
                        targ[0] = sym

                        #--! TRACKING
                        if tracking:
                            t1 = targ[1]
                            sym.lineno = t1.lineno
                            sym.lexpos = t1.lexpos
                            t1 = targ[-1]
                            sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
                            sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
                        #--! TRACKING

                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # below as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            del symstack[-plen:]
                            self.state = state
                            p.callable(pslice)
                            del statestack[-plen:]
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            symstack.extend(targ[1:-1])         # Put the production slice back on the stack
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                    else:

                        #--! TRACKING
                        if tracking:
                            sym.lineno = lexer.lineno
                            sym.lexpos = lexer.lexpos
                        #--! TRACKING

                        targ = [sym]

                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # above as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            self.state = state
                            p.callable(pslice)
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                if t == 0:
                    n = symstack[-1]
                    result = getattr(n, 'value', None)
                    return result

            if t is None:


                # We have some kind of parsing error here.  To handle
                # this, we are going to push the current token onto
                # the tokenstack and replace it with an 'error' token.
                # If there are any synchronization rules, they may
                # catch it.
                #
                # In addition to pushing the error token, we call call
                # the user defined p_error() function if this is the
                # first syntax error.  This function is only called if
                # errorcount == 0.
                if errorcount == 0 or self.errorok:
                    errorcount = error_count
                    self.errorok = False
                    errtoken = lookahead
                    if errtoken.type == '$end':
                        errtoken = None               # End of file!
                    if self.errorfunc:
                        if errtoken and not hasattr(errtoken, 'lexer'):
                            errtoken.lexer = lexer
                        self.state = state
                        tok = call_errorfunc(self.errorfunc, errtoken, self)
                        if self.errorok:
                            # User must have done some kind of panic
                            # mode recovery on their own.  The
                            # returned token is the next lookahead
                            lookahead = tok
                            errtoken = None
                            continue
                    else:
                        if errtoken:
                            if hasattr(errtoken, 'lineno'):
                                lineno = lookahead.lineno
                            else:
                                lineno = 0
                            if lineno:
                                sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
                            else:
                                sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
                        else:
                            sys.stderr.write('yacc: Parse error in input. EOF\n')
                            return

                else:
                    errorcount = error_count

                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
                # entire parse has been rolled back and we're completely hosed.   The token is
                # discarded and we just keep going.

                if len(statestack) <= 1 and lookahead.type != '$end':
                    lookahead = None
                    errtoken = None
                    state = 0
                    # Nuke the pushback stack
                    del lookaheadstack[:]
                    continue

                # case 2: the statestack has a couple of entries on it, but we're
                # at the end of the file. nuke the top entry and generate an error token

                # Start nuking entries on the stack
                if lookahead.type == '$end':
                    # Whoa. We're really hosed here. Bail out
                    return

                if lookahead.type != 'error':
                    sym = symstack[-1]
                    if sym.type == 'error':
                        # Hmmm. Error is on top of stack, we'll just nuke input
                        # symbol and continue
                        #--! TRACKING
                        if tracking:
                            sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
                            sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
                        #--! TRACKING
                        lookahead = None
                        continue

                    # Create the error symbol for the first time and make it the new lookahead symbol
                    t = YaccSymbol()
                    t.type = 'error'

                    if hasattr(lookahead, 'lineno'):
                        t.lineno = t.endlineno = lookahead.lineno
                    if hasattr(lookahead, 'lexpos'):
                        t.lexpos = t.endlexpos = lookahead.lexpos
                    t.value = lookahead
                    lookaheadstack.append(lookahead)
                    lookahead = t
                else:
                    sym = symstack.pop()
                    #--! TRACKING
                    if tracking:
                        lookahead.lineno = sym.lineno
                        lookahead.lexpos = sym.lexpos
                    #--! TRACKING
                    statestack.pop()
                    state = statestack[-1]

                continue

            # Call an error function here
            raise RuntimeError('yacc: internal parser error!!!\n')

        #--! parseopt-end

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # parseopt_notrack().
    #
    # Optimized version of parseopt() with line number tracking removed.
    # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
    # by the ply/ygen.py script. Make changes to the parsedebug() method instead.
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
        #--! parseopt-notrack-start
        lookahead = None                         # Current lookahead symbol
        lookaheadstack = []                      # Stack of lookahead symbols
        actions = self.action                    # Local reference to action table (to avoid lookup on self.)
        goto    = self.goto                      # Local reference to goto table (to avoid lookup on self.)
        prod    = self.productions               # Local reference to production list (to avoid lookup on self.)
        defaulted_states = self.defaulted_states # Local reference to defaulted states
        pslice  = YaccProduction(None)           # Production object passed to grammar rules
        errorcount = 0                           # Used during error recovery


        # If no lexer was given, we will try to use the lex module
        if not lexer:
            from . import lex
            lexer = lex.lexer

        # Set up the lexer and parser objects on pslice
        pslice.lexer = lexer
        pslice.parser = self

        # If input was supplied, pass to lexer
        if input is not None:
            lexer.input(input)

        if tokenfunc is None:
            # Tokenize function
            get_token = lexer.token
        else:
            get_token = tokenfunc

        # Set the parser() token method (sometimes used in error recovery)
        self.token = get_token

        # Set up the state and symbol stacks

        statestack = []                # Stack of parsing states
        self.statestack = statestack
        symstack   = []                # Stack of grammar symbols
        self.symstack = symstack

        pslice.stack = symstack         # Put in the production
        errtoken   = None               # Err token

        # The start state is assumed to be (0,$end)

        statestack.append(0)
        sym = YaccSymbol()
        sym.type = '$end'
        symstack.append(sym)
        state = 0
        while True:
            # Get the next symbol on the input.  If a lookahead symbol
            # is already set, we just use that. Otherwise, we'll pull
            # the next token off of the lookaheadstack or from the lexer


            if state not in defaulted_states:
                if not lookahead:
                    if not lookaheadstack:
                        lookahead = get_token()     # Get the next token
                    else:
                        lookahead = lookaheadstack.pop()
                    if not lookahead:
                        lookahead = YaccSymbol()
                        lookahead.type = '$end'

                # Check the action table
                ltype = lookahead.type
                t = actions[state].get(ltype)
            else:
                t = defaulted_states[state]


            if t is not None:
                if t > 0:
                    # shift a symbol on the stack
                    statestack.append(t)
                    state = t


                    symstack.append(lookahead)
                    lookahead = None

                    # Decrease error count on successful shift
                    if errorcount:
                        errorcount -= 1
                    continue

                if t < 0:
                    # reduce a symbol on the stack, emit a production
                    p = prod[-t]
                    pname = p.name
                    plen  = p.len

                    # Get production function
                    sym = YaccSymbol()
                    sym.type = pname       # Production name
                    sym.value = None


                    if plen:
                        targ = symstack[-plen-1:]
                        targ[0] = sym


                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # below as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            del symstack[-plen:]
                            self.state = state
                            p.callable(pslice)
                            del statestack[-plen:]
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            symstack.extend(targ[1:-1])         # Put the production slice back on the stack
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                    else:


                        targ = [sym]

                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        # The code enclosed in this section is duplicated
                        # above as a performance optimization.  Make sure
                        # changes get made in both locations.

                        pslice.slice = targ

                        try:
                            # Call the grammar rule with our special slice object
                            self.state = state
                            p.callable(pslice)
                            symstack.append(sym)
                            state = goto[statestack[-1]][pname]
                            statestack.append(state)
                        except SyntaxError:
                            # If an error was set. Enter error recovery state
                            lookaheadstack.append(lookahead)    # Save the current lookahead token
                            statestack.pop()                    # Pop back one state (before the reduce)
                            state = statestack[-1]
                            sym.type = 'error'
                            sym.value = 'error'
                            lookahead = sym
                            errorcount = error_count
                            self.errorok = False

                        continue
                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                if t == 0:
                    n = symstack[-1]
                    result = getattr(n, 'value', None)
                    return result

            if t is None:


                # We have some kind of parsing error here.  To handle
                # this, we are going to push the current token onto
                # the tokenstack and replace it with an 'error' token.
                # If there are any synchronization rules, they may
                # catch it.
                #
                # In addition to pushing the error token, we call call
                # the user defined p_error() function if this is the
                # first syntax error.  This function is only called if
                # errorcount == 0.
                if errorcount == 0 or self.errorok:
                    errorcount = error_count
                    self.errorok = False
                    errtoken = lookahead
                    if errtoken.type == '$end':
                        errtoken = None               # End of file!
                    if self.errorfunc:
                        if errtoken and not hasattr(errtoken, 'lexer'):
                            errtoken.lexer = lexer
                        self.state = state
                        tok = call_errorfunc(self.errorfunc, errtoken, self)
                        if self.errorok:
                            # User must have done some kind of panic
                            # mode recovery on their own.  The
                            # returned token is the next lookahead
                            lookahead = tok
                            errtoken = None
                            continue
                    else:
                        if errtoken:
                            if hasattr(errtoken, 'lineno'):
                                lineno = lookahead.lineno
                            else:
                                lineno = 0
                            if lineno:
                                sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
                            else:
                                sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
                        else:
                            sys.stderr.write('yacc: Parse error in input. EOF\n')
                            return

                else:
                    errorcount = error_count

                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
                # entire parse has been rolled back and we're completely hosed.   The token is
                # discarded and we just keep going.

                if len(statestack) <= 1 and lookahead.type != '$end':
                    lookahead = None
                    errtoken = None
                    state = 0
                    # Nuke the pushback stack
                    del lookaheadstack[:]
                    continue

                # case 2: the statestack has a couple of entries on it, but we're
                # at the end of the file. nuke the top entry and generate an error token

                # Start nuking entries on the stack
                if lookahead.type == '$end':
                    # Whoa. We're really hosed here. Bail out
                    return

                if lookahead.type != 'error':
                    sym = symstack[-1]
                    if sym.type == 'error':
                        # Hmmm. Error is on top of stack, we'll just nuke input
                        # symbol and continue
                        lookahead = None
                        continue

                    # Create the error symbol for the first time and make it the new lookahead symbol
                    t = YaccSymbol()
                    t.type = 'error'

                    if hasattr(lookahead, 'lineno'):
                        t.lineno = t.endlineno = lookahead.lineno
                    if hasattr(lookahead, 'lexpos'):
                        t.lexpos = t.endlexpos = lookahead.lexpos
                    t.value = lookahead
                    lookaheadstack.append(lookahead)
                    lookahead = t
                else:
                    sym = symstack.pop()
                    statestack.pop()
                    state = statestack[-1]

                continue

            # Call an error function here
            raise RuntimeError('yacc: internal parser error!!!\n')

        #--! parseopt-notrack-end

# -----------------------------------------------------------------------------
#                          === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------

# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')

# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
#       expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
#       name     - Name of the production.  For example 'expr'
#       prod     - A list of symbols on the right side ['expr','PLUS','term']
#       prec     - Production precedence level
#       number   - Production number.
#       func     - Function that executes on reduce
#       file     - File where production function is defined
#       lineno   - Line number where production function is defined
#
# The following attributes are defined or optional.
#
#       len       - Length of the production (number of symbols on right hand side)
#       usyms     - Set of unique symbols found in the production
# -----------------------------------------------------------------------------

class Production(object):
    reduced = 0
    def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
        self.name     = name
        self.prod     = tuple(prod)
        self.number   = number
        self.func     = func
        self.callable = None
        self.file     = file
        self.line     = line
        self.prec     = precedence

        # Internal settings used during table construction

        self.len  = len(self.prod)   # Length of the production

        # Create a list of unique production symbols used in the production
        self.usyms = []
        for s in self.prod:
            if s not in self.usyms:
                self.usyms.append(s)

        # List of all LR items for the production
        self.lr_items = []
        self.lr_next = None

        # Create a string representation
        if self.prod:
            self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
        else:
            self.str = '%s -> <empty>' % self.name

    def __str__(self):
        return self.str

    def __repr__(self):
        return 'Production(' + str(self) + ')'

    def __len__(self):
        return len(self.prod)

    def __nonzero__(self):
        return 1

    def __getitem__(self, index):
        return self.prod[index]

    # Return the nth lr_item from the production (or None if at the end)
    def lr_item(self, n):
        if n > len(self.prod):
            return None
        p = LRItem(self, n)
        # Precompute the list of productions immediately following.
        try:
            p.lr_after = self.Prodnames[p.prod[n+1]]
        except (IndexError, KeyError):
            p.lr_after = []
        try:
            p.lr_before = p.prod[n-1]
        except IndexError:
            p.lr_before = None
        return p

    # Bind the production function name to a callable
    def bind(self, pdict):
        if self.func:
            self.callable = pdict[self.func]

# This class serves as a minimal standin for Production objects when
# reading table data from files.   It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
    def __init__(self, str, name, len, func, file, line):
        self.name     = name
        self.len      = len
        self.func     = func
        self.callable = None
        self.file     = file
        self.line     = line
        self.str      = str

    def __str__(self):
        return self.str

    def __repr__(self):
        return 'MiniProduction(%s)' % self.str

    # Bind the production function name to a callable
    def bind(self, pdict):
        if self.func:
            self.callable = pdict[self.func]


# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule.  For
# example:
#
#       expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse.  Here
# basic attributes:
#
#       name       - Name of the production.  For example 'expr'
#       prod       - A list of symbols on the right side ['expr','.', 'PLUS','term']
#       number     - Production number.
#
#       lr_next      Next LR item. Example, if we are ' expr -> expr . PLUS term'
#                    then lr_next refers to 'expr -> expr PLUS . term'
#       lr_index   - LR item index (location of the ".") in the prod list.
#       lookaheads - LALR lookahead symbols for this item
#       len        - Length of the production (number of symbols on right hand side)
#       lr_after    - List of all productions that immediately follow
#       lr_before   - Grammar symbol immediately before
# -----------------------------------------------------------------------------

class LRItem(object):
    def __init__(self, p, n):
        self.name       = p.name
        self.prod       = list(p.prod)
        self.number     = p.number
        self.lr_index   = n
        self.lookaheads = {}
        self.prod.insert(n, '.')
        self.prod       = tuple(self.prod)
        self.len        = len(self.prod)
        self.usyms      = p.usyms

    def __str__(self):
        if self.prod:
            s = '%s -> %s' % (self.name, ' '.join(self.prod))
        else:
            s = '%s -> <empty>' % self.name
        return s

    def __repr__(self):
        return 'LRItem(' + str(self) + ')'

# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols.  Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
    i = len(symbols) - 1
    while i >= 0:
        if symbols[i] in terminals:
            return symbols[i]
        i -= 1
    return None

# -----------------------------------------------------------------------------
#                           === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------

class GrammarError(YaccError):
    pass

class Grammar(object):
    def __init__(self, terminals):
        self.Productions  = [None]  # A list of all of the productions.  The first
                                    # entry is always reserved for the purpose of
                                    # building an augmented grammar

        self.Prodnames    = {}      # A dictionary mapping the names of nonterminals to a list of all
                                    # productions of that nonterminal.

        self.Prodmap      = {}      # A dictionary that is only used to detect duplicate
                                    # productions.

        self.Terminals    = {}      # A dictionary mapping the names of terminal symbols to a
                                    # list of the rules where they are used.

        for term in terminals:
            self.Terminals[term] = []

        self.Terminals['error'] = []

        self.Nonterminals = {}      # A dictionary mapping names of nonterminals to a list
                                    # of rule numbers where they are used.

        self.First        = {}      # A dictionary of precomputed FIRST(x) symbols

        self.Follow       = {}      # A dictionary of precomputed FOLLOW(x) symbols

        self.Precedence   = {}      # Precedence rules for each terminal. Contains tuples of the
                                    # form ('right',level) or ('nonassoc', level) or ('left',level)

        self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
                                    # This is only used to provide error checking and to generate
                                    # a warning about unused precedence rules.

        self.Start = None           # Starting symbol for the grammar


    def __len__(self):
        return len(self.Productions)

    def __getitem__(self, index):
        return self.Productions[index]

    # -----------------------------------------------------------------------------
    # set_precedence()
    #
    # Sets the precedence for a given terminal. assoc is the associativity such as
    # 'left','right', or 'nonassoc'.  level is a numeric level.
    #
    # -----------------------------------------------------------------------------

    def set_precedence(self, term, assoc, level):
        assert self.Productions == [None], 'Must call set_precedence() before add_production()'
        if term in self.Precedence:
            raise GrammarError('Precedence already specified for terminal %r' % term)
        if assoc not in ['left', 'right', 'nonassoc']:
            raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
        self.Precedence[term] = (assoc, level)

    # -----------------------------------------------------------------------------
    # add_production()
    #
    # Given an action function, this function assembles a production rule and
    # computes its precedence level.
    #
    # The production rule is supplied as a list of symbols.   For example,
    # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
    # symbols ['expr','PLUS','term'].
    #
    # Precedence is determined by the precedence of the right-most non-terminal
    # or the precedence of a terminal specified by %prec.
    #
    # A variety of error checks are performed to make sure production symbols
    # are valid and that %prec is used correctly.
    # -----------------------------------------------------------------------------

    def add_production(self, prodname, syms, func=None, file='', line=0):

        if prodname in self.Terminals:
            raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
        if prodname == 'error':
            raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
        if not _is_identifier.match(prodname):
            raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))

        # Look for literal tokens
        for n, s in enumerate(syms):
            if s[0] in "'\"":
                try:
                    c = eval(s)
                    if (len(c) > 1):
                        raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
                                           (file, line, s, prodname))
                    if c not in self.Terminals:
                        self.Terminals[c] = []
                    syms[n] = c
                    continue
                except SyntaxError:
                    pass
            if not _is_identifier.match(s) and s != '%prec':
                raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))

        # Determine the precedence level
        if '%prec' in syms:
            if syms[-1] == '%prec':
                raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
            if syms[-2] != '%prec':
                raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
                                   (file, line))
            precname = syms[-1]
            prodprec = self.Precedence.get(precname)
            if not prodprec:
                raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
            else:
                self.UsedPrecedence.add(precname)
            del syms[-2:]     # Drop %prec from the rule
        else:
            # If no %prec, precedence is determined by the rightmost terminal symbol
            precname = rightmost_terminal(syms, self.Terminals)
            prodprec = self.Precedence.get(precname, ('right', 0))

        # See if the rule is already in the rulemap
        map = '%s -> %s' % (prodname, syms)
        if map in self.Prodmap:
            m = self.Prodmap[map]
            raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
                               'Previous definition at %s:%d' % (m.file, m.line))

        # From this point on, everything is valid.  Create a new Production instance
        pnumber  = len(self.Productions)
        if prodname not in self.Nonterminals:
            self.Nonterminals[prodname] = []

        # Add the production number to Terminals and Nonterminals
        for t in syms:
            if t in self.Terminals:
                self.Terminals[t].append(pnumber)
            else:
                if t not in self.Nonterminals:
                    self.Nonterminals[t] = []
                self.Nonterminals[t].append(pnumber)

        # Create a production and add it to the list of productions
        p = Production(pnumber, prodname, syms, prodprec, func, file, line)
        self.Productions.append(p)
        self.Prodmap[map] = p

        # Add to the global productions list
        try:
            self.Prodnames[prodname].append(p)
        except KeyError:
            self.Prodnames[prodname] = [p]

    # -----------------------------------------------------------------------------
    # set_start()
    #
    # Sets the starting symbol and creates the augmented grammar.  Production
    # rule 0 is S' -> start where start is the start symbol.
    # -----------------------------------------------------------------------------

    def set_start(self, start=None):
        if not start:
            start = self.Productions[1].name
        if start not in self.Nonterminals:
            raise GrammarError('start symbol %s undefined' % start)
        self.Productions[0] = Production(0, "S'", [start])
        self.Nonterminals[start].append(0)
        self.Start = start

    # -----------------------------------------------------------------------------
    # find_unreachable()
    #
    # Find all of the nonterminal symbols that can't be reached from the starting
    # symbol.  Returns a list of nonterminals that can't be reached.
    # -----------------------------------------------------------------------------

    def find_unreachable(self):

        # Mark all symbols that are reachable from a symbol s
        def mark_reachable_from(s):
            if s in reachable:
                return
            reachable.add(s)
            for p in self.Prodnames.get(s, []):
                for r in p.prod:
                    mark_reachable_from(r)

        reachable = set()
        mark_reachable_from(self.Productions[0].prod[0])
        return [s for s in self.Nonterminals if s not in reachable]

    # -----------------------------------------------------------------------------
    # infinite_cycles()
    #
    # This function looks at the various parsing rules and tries to detect
    # infinite recursion cycles (grammar rules where there is no possible way
    # to derive a string of only terminals).
    # -----------------------------------------------------------------------------

    def infinite_cycles(self):
        terminates = {}

        # Terminals:
        for t in self.Terminals:
            terminates[t] = True

        terminates['$end'] = True

        # Nonterminals:

        # Initialize to false:
        for n in self.Nonterminals:
            terminates[n] = False

        # Then propagate termination until no change:
        while True:
            some_change = False
            for (n, pl) in self.Prodnames.items():
                # Nonterminal n terminates iff any of its productions terminates.
                for p in pl:
                    # Production p terminates iff all of its rhs symbols terminate.
                    for s in p.prod:
                        if not terminates[s]:
                            # The symbol s does not terminate,
                            # so production p does not terminate.
                            p_terminates = False
                            break
                    else:
                        # didn't break from the loop,
                        # so every symbol s terminates
                        # so production p terminates.
                        p_terminates = True

                    if p_terminates:
                        # symbol n terminates!
                        if not terminates[n]:
                            terminates[n] = True
                            some_change = True
                        # Don't need to consider any more productions for this n.
                        break

            if not some_change:
                break

        infinite = []
        for (s, term) in terminates.items():
            if not term:
                if s not in self.Prodnames and s not in self.Terminals and s != 'error':
                    # s is used-but-not-defined, and we've already warned of that,
                    # so it would be overkill to say that it's also non-terminating.
                    pass
                else:
                    infinite.append(s)

        return infinite

    # -----------------------------------------------------------------------------
    # undefined_symbols()
    #
    # Find all symbols that were used the grammar, but not defined as tokens or
    # grammar rules.  Returns a list of tuples (sym, prod) where sym in the symbol
    # and prod is the production where the symbol was used.
    # -----------------------------------------------------------------------------
    def undefined_symbols(self):
        result = []
        for p in self.Productions:
            if not p:
                continue

            for s in p.prod:
                if s not in self.Prodnames and s not in self.Terminals and s != 'error':
                    result.append((s, p))
        return result

    # -----------------------------------------------------------------------------
    # unused_terminals()
    #
    # Find all terminals that were defined, but not used by the grammar.  Returns
    # a list of all symbols.
    # -----------------------------------------------------------------------------
    def unused_terminals(self):
        unused_tok = []
        for s, v in self.Terminals.items():
            if s != 'error' and not v:
                unused_tok.append(s)

        return unused_tok

    # ------------------------------------------------------------------------------
    # unused_rules()
    #
    # Find all grammar rules that were defined,  but not used (maybe not reachable)
    # Returns a list of productions.
    # ------------------------------------------------------------------------------

    def unused_rules(self):
        unused_prod = []
        for s, v in self.Nonterminals.items():
            if not v:
                p = self.Prodnames[s][0]
                unused_prod.append(p)
        return unused_prod

    # -----------------------------------------------------------------------------
    # unused_precedence()
    #
    # Returns a list of tuples (term,precedence) corresponding to precedence
    # rules that were never used by the grammar.  term is the name of the terminal
    # on which precedence was applied and precedence is a string such as 'left' or
    # 'right' corresponding to the type of precedence.
    # -----------------------------------------------------------------------------

    def unused_precedence(self):
        unused = []
        for termname in self.Precedence:
            if not (termname in self.Terminals or termname in self.UsedPrecedence):
                unused.append((termname, self.Precedence[termname][0]))

        return unused

    # -------------------------------------------------------------------------
    # _first()
    #
    # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
    #
    # During execution of compute_first1, the result may be incomplete.
    # Afterward (e.g., when called from compute_follow()), it will be complete.
    # -------------------------------------------------------------------------
    def _first(self, beta):

        # We are computing First(x1,x2,x3,...,xn)
        result = []
        for x in beta:
            x_produces_empty = False

            # Add all the non-<empty> symbols of First[x] to the result.
            for f in self.First[x]:
                if f == '<empty>':
                    x_produces_empty = True
                else:
                    if f not in result:
                        result.append(f)

            if x_produces_empty:
                # We have to consider the next x in beta,
                # i.e. stay in the loop.
                pass
            else:
                # We don't have to consider any further symbols in beta.
                break
        else:
            # There was no 'break' from the loop,
            # so x_produces_empty was true for all x in beta,
            # so beta produces empty as well.
            result.append('<empty>')

        return result

    # -------------------------------------------------------------------------
    # compute_first()
    #
    # Compute the value of FIRST1(X) for all symbols
    # -------------------------------------------------------------------------
    def compute_first(self):
        if self.First:
            return self.First

        # Terminals:
        for t in self.Terminals:
            self.First[t] = [t]

        self.First['$end'] = ['$end']

        # Nonterminals:

        # Initialize to the empty set:
        for n in self.Nonterminals:
            self.First[n] = []

        # Then propagate symbols until no change:
        while True:
            some_change = False
            for n in self.Nonterminals:
                for p in self.Prodnames[n]:
                    for f in self._first(p.prod):
                        if f not in self.First[n]:
                            self.First[n].append(f)
                            some_change = True
            if not some_change:
                break

        return self.First

    # ---------------------------------------------------------------------
    # compute_follow()
    #
    # Computes all of the follow sets for every non-terminal symbol.  The
    # follow set is the set of all symbols that might follow a given
    # non-terminal.  See the Dragon book, 2nd Ed. p. 189.
    # ---------------------------------------------------------------------
    def compute_follow(self, start=None):
        # If already computed, return the result
        if self.Follow:
            return self.Follow

        # If first sets not computed yet, do that first.
        if not self.First:
            self.compute_first()

        # Add '$end' to the follow list of the start symbol
        for k in self.Nonterminals:
            self.Follow[k] = []

        if not start:
            start = self.Productions[1].name

        self.Follow[start] = ['$end']

        while True:
            didadd = False
            for p in self.Productions[1:]:
                # Here is the production set
                for i, B in enumerate(p.prod):
                    if B in self.Nonterminals:
                        # Okay. We got a non-terminal in a production
                        fst = self._first(p.prod[i+1:])
                        hasempty = False
                        for f in fst:
                            if f != '<empty>' and f not in self.Follow[B]:
                                self.Follow[B].append(f)
                                didadd = True
                            if f == '<empty>':
                                hasempty = True
                        if hasempty or i == (len(p.prod)-1):
                            # Add elements of follow(a) to follow(b)
                            for f in self.Follow[p.name]:
                                if f not in self.Follow[B]:
                                    self.Follow[B].append(f)
                                    didadd = True
            if not didadd:
                break
        return self.Follow


    # -----------------------------------------------------------------------------
    # build_lritems()
    #
    # This function walks the list of productions and builds a complete set of the
    # LR items.  The LR items are stored in two ways:  First, they are uniquely
    # numbered and placed in the list _lritems.  Second, a linked list of LR items
    # is built for each production.  For example:
    #
    #   E -> E PLUS E
    #
    # Creates the list
    #
    #  [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
    # -----------------------------------------------------------------------------

    def build_lritems(self):
        for p in self.Productions:
            lastlri = p
            i = 0
            lr_items = []
            while True:
                if i > len(p):
                    lri = None
                else:
                    lri = LRItem(p, i)
                    # Precompute the list of productions immediately following
                    try:
                        lri.lr_after = self.Prodnames[lri.prod[i+1]]
                    except (IndexError, KeyError):
                        lri.lr_after = []
                    try:
                        lri.lr_before = lri.prod[i-1]
                    except IndexError:
                        lri.lr_before = None

                lastlri.lr_next = lri
                if not lri:
                    break
                lr_items.append(lri)
                lastlri = lri
                i += 1
            p.lr_items = lr_items

# -----------------------------------------------------------------------------
#                            == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here.  They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------

class VersionError(YaccError):
    pass

class LRTable(object):
    def __init__(self):
        self.lr_action = None
        self.lr_goto = None
        self.lr_productions = None
        self.lr_method = None

    def read_table(self, module):
        if isinstance(module, types.ModuleType):
            parsetab = module
        else:
            exec('import %s' % module)
            parsetab = sys.modules[module]

        if parsetab._tabversion != __tabversion__:
            raise VersionError('yacc table file version is out of date')

        self.lr_action = parsetab._lr_action
        self.lr_goto = parsetab._lr_goto

        self.lr_productions = []
        for p in parsetab._lr_productions:
            self.lr_productions.append(MiniProduction(*p))

        self.lr_method = parsetab._lr_method
        return parsetab._lr_signature

    def read_pickle(self, filename):
        try:
            import cPickle as pickle
        except ImportError:
            import pickle

        if not os.path.exists(filename):
          raise ImportError

        in_f = open(filename, 'rb')

        tabversion = pickle.load(in_f)
        if tabversion != __tabversion__:
            raise VersionError('yacc table file version is out of date')
        self.lr_method = pickle.load(in_f)
        signature      = pickle.load(in_f)
        self.lr_action = pickle.load(in_f)
        self.lr_goto   = pickle.load(in_f)
        productions    = pickle.load(in_f)

        self.lr_productions = []
        for p in productions:
            self.lr_productions.append(MiniProduction(*p))

        in_f.close()
        return signature

    # Bind all production function names to callable objects in pdict
    def bind_callables(self, pdict):
        for p in self.lr_productions:
            p.bind(pdict)


# -----------------------------------------------------------------------------
#                           === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------

# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
#     F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs:  X    - An input set
#          R    - A relation
#          FP   - Set-valued function
# ------------------------------------------------------------------------------

def digraph(X, R, FP):
    N = {}
    for x in X:
        N[x] = 0
    stack = []
    F = {}
    for x in X:
        if N[x] == 0:
            traverse(x, N, stack, F, X, R, FP)
    return F

def traverse(x, N, stack, F, X, R, FP):
    stack.append(x)
    d = len(stack)
    N[x] = d
    F[x] = FP(x)             # F(X) <- F'(x)

    rel = R(x)               # Get y's related to x
    for y in rel:
        if N[y] == 0:
            traverse(y, N, stack, F, X, R, FP)
        N[x] = min(N[x], N[y])
        for a in F.get(y, []):
            if a not in F[x]:
                F[x].append(a)
    if N[x] == d:
        N[stack[-1]] = MAXINT
        F[stack[-1]] = F[x]
        element = stack.pop()
        while element != x:
            N[stack[-1]] = MAXINT
            F[stack[-1]] = F[x]
            element = stack.pop()

class LALRError(YaccError):
    pass

# -----------------------------------------------------------------------------
#                             == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm.  There are no
# public methods except for write()
# -----------------------------------------------------------------------------

class LRGeneratedTable(LRTable):
    def __init__(self, grammar, method='LALR', log=None):
        if method not in ['SLR', 'LALR']:
            raise LALRError('Unsupported method %s' % method)

        self.grammar = grammar
        self.lr_method = method

        # Set up the logger
        if not log:
            log = NullLogger()
        self.log = log

        # Internal attributes
        self.lr_action     = {}        # Action table
        self.lr_goto       = {}        # Goto table
        self.lr_productions  = grammar.Productions    # Copy of grammar Production array
        self.lr_goto_cache = {}        # Cache of computed gotos
        self.lr0_cidhash   = {}        # Cache of closures

        self._add_count    = 0         # Internal counter used to detect cycles

        # Diagonistic information filled in by the table generator
        self.sr_conflict   = 0
        self.rr_conflict   = 0
        self.conflicts     = []        # List of conflicts

        self.sr_conflicts  = []
        self.rr_conflicts  = []

        # Build the tables
        self.grammar.build_lritems()
        self.grammar.compute_first()
        self.grammar.compute_follow()
        self.lr_parse_table()

    # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.

    def lr0_closure(self, I):
        self._add_count += 1

        # Add everything in I to J
        J = I[:]
        didadd = True
        while didadd:
            didadd = False
            for j in J:
                for x in j.lr_after:
                    if getattr(x, 'lr0_added', 0) == self._add_count:
                        continue
                    # Add B --> .G to J
                    J.append(x.lr_next)
                    x.lr0_added = self._add_count
                    didadd = True

        return J

    # Compute the LR(0) goto function goto(I,X) where I is a set
    # of LR(0) items and X is a grammar symbol.   This function is written
    # in a way that guarantees uniqueness of the generated goto sets
    # (i.e. the same goto set will never be returned as two different Python
    # objects).  With uniqueness, we can later do fast set comparisons using
    # id(obj) instead of element-wise comparison.

    def lr0_goto(self, I, x):
        # First we look for a previously cached entry
        g = self.lr_goto_cache.get((id(I), x))
        if g:
            return g

        # Now we generate the goto set in a way that guarantees uniqueness
        # of the result

        s = self.lr_goto_cache.get(x)
        if not s:
            s = {}
            self.lr_goto_cache[x] = s

        gs = []
        for p in I:
            n = p.lr_next
            if n and n.lr_before == x:
                s1 = s.get(id(n))
                if not s1:
                    s1 = {}
                    s[id(n)] = s1
                gs.append(n)
                s = s1
        g = s.get('$end')
        if not g:
            if gs:
                g = self.lr0_closure(gs)
                s['$end'] = g
            else:
                s['$end'] = gs
        self.lr_goto_cache[(id(I), x)] = g
        return g

    # Compute the LR(0) sets of item function
    def lr0_items(self):
        C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
        i = 0
        for I in C:
            self.lr0_cidhash[id(I)] = i
            i += 1

        # Loop over the items in C and each grammar symbols
        i = 0
        while i < len(C):
            I = C[i]
            i += 1

            # Collect all of the symbols that could possibly be in the goto(I,X) sets
            asyms = {}
            for ii in I:
                for s in ii.usyms:
                    asyms[s] = None

            for x in asyms:
                g = self.lr0_goto(I, x)
                if not g or id(g) in self.lr0_cidhash:
                    continue
                self.lr0_cidhash[id(g)] = len(C)
                C.append(g)

        return C

    # -----------------------------------------------------------------------------
    #                       ==== LALR(1) Parsing ====
    #
    # LALR(1) parsing is almost exactly the same as SLR except that instead of
    # relying upon Follow() sets when performing reductions, a more selective
    # lookahead set that incorporates the state of the LR(0) machine is utilized.
    # Thus, we mainly just have to focus on calculating the lookahead sets.
    #
    # The method used here is due to DeRemer and Pennelo (1982).
    #
    # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
    #     Lookahead Sets", ACM Transactions on Programming Languages and Systems,
    #     Vol. 4, No. 4, Oct. 1982, pp. 615-649
    #
    # Further details can also be found in:
    #
    #  J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
    #      McGraw-Hill Book Company, (1985).
    #
    # -----------------------------------------------------------------------------

    # -----------------------------------------------------------------------------
    # compute_nullable_nonterminals()
    #
    # Creates a dictionary containing all of the non-terminals that might produce
    # an empty production.
    # -----------------------------------------------------------------------------

    def compute_nullable_nonterminals(self):
        nullable = set()
        num_nullable = 0
        while True:
            for p in self.grammar.Productions[1:]:
                if p.len == 0:
                    nullable.add(p.name)
                    continue
                for t in p.prod:
                    if t not in nullable:
                        break
                else:
                    nullable.add(p.name)
            if len(nullable) == num_nullable:
                break
            num_nullable = len(nullable)
        return nullable

    # -----------------------------------------------------------------------------
    # find_nonterminal_trans(C)
    #
    # Given a set of LR(0) items, this functions finds all of the non-terminal
    # transitions.    These are transitions in which a dot appears immediately before
    # a non-terminal.   Returns a list of tuples of the form (state,N) where state
    # is the state number and N is the nonterminal symbol.
    #
    # The input C is the set of LR(0) items.
    # -----------------------------------------------------------------------------

    def find_nonterminal_transitions(self, C):
        trans = []
        for stateno, state in enumerate(C):
            for p in state:
                if p.lr_index < p.len - 1:
                    t = (stateno, p.prod[p.lr_index+1])
                    if t[1] in self.grammar.Nonterminals:
                        if t not in trans:
                            trans.append(t)
        return trans

    # -----------------------------------------------------------------------------
    # dr_relation()
    #
    # Computes the DR(p,A) relationships for non-terminal transitions.  The input
    # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
    #
    # Returns a list of terminals.
    # -----------------------------------------------------------------------------

    def dr_relation(self, C, trans, nullable):
        state, N = trans
        terms = []

        g = self.lr0_goto(C[state], N)
        for p in g:
            if p.lr_index < p.len - 1:
                a = p.prod[p.lr_index+1]
                if a in self.grammar.Terminals:
                    if a not in terms:
                        terms.append(a)

        # This extra bit is to handle the start state
        if state == 0 and N == self.grammar.Productions[0].prod[0]:
            terms.append('$end')

        return terms

    # -----------------------------------------------------------------------------
    # reads_relation()
    #
    # Computes the READS() relation (p,A) READS (t,C).
    # -----------------------------------------------------------------------------

    def reads_relation(self, C, trans, empty):
        # Look for empty transitions
        rel = []
        state, N = trans

        g = self.lr0_goto(C[state], N)
        j = self.lr0_cidhash.get(id(g), -1)
        for p in g:
            if p.lr_index < p.len - 1:
                a = p.prod[p.lr_index + 1]
                if a in empty:
                    rel.append((j, a))

        return rel

    # -----------------------------------------------------------------------------
    # compute_lookback_includes()
    #
    # Determines the lookback and includes relations
    #
    # LOOKBACK:
    #
    # This relation is determined by running the LR(0) state machine forward.
    # For example, starting with a production "N : . A B C", we run it forward
    # to obtain "N : A B C ."   We then build a relationship between this final
    # state and the starting state.   These relationships are stored in a dictionary
    # lookdict.
    #
    # INCLUDES:
    #
    # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
    #
    # This relation is used to determine non-terminal transitions that occur
    # inside of other non-terminal transition states.   (p,A) INCLUDES (p', B)
    # if the following holds:
    #
    #       B -> LAT, where T -> epsilon and p' -L-> p
    #
    # L is essentially a prefix (which may be empty), T is a suffix that must be
    # able to derive an empty string.  State p' must lead to state p with the string L.
    #
    # -----------------------------------------------------------------------------

    def compute_lookback_includes(self, C, trans, nullable):
        lookdict = {}          # Dictionary of lookback relations
        includedict = {}       # Dictionary of include relations

        # Make a dictionary of non-terminal transitions
        dtrans = {}
        for t in trans:
            dtrans[t] = 1

        # Loop over all transitions and compute lookbacks and includes
        for state, N in trans:
            lookb = []
            includes = []
            for p in C[state]:
                if p.name != N:
                    continue

                # Okay, we have a name match.  We now follow the production all the way
                # through the state machine until we get the . on the right hand side

                lr_index = p.lr_index
                j = state
                while lr_index < p.len - 1:
                    lr_index = lr_index + 1
                    t = p.prod[lr_index]

                    # Check to see if this symbol and state are a non-terminal transition
                    if (j, t) in dtrans:
                        # Yes.  Okay, there is some chance that this is an includes relation
                        # the only way to know for certain is whether the rest of the
                        # production derives empty

                        li = lr_index + 1
                        while li < p.len:
                            if p.prod[li] in self.grammar.Terminals:
                                break      # No forget it
                            if p.prod[li] not in nullable:
                                break
                            li = li + 1
                        else:
                            # Appears to be a relation between (j,t) and (state,N)
                            includes.append((j, t))

                    g = self.lr0_goto(C[j], t)               # Go to next set
                    j = self.lr0_cidhash.get(id(g), -1)      # Go to next state

                # When we get here, j is the final state, now we have to locate the production
                for r in C[j]:
                    if r.name != p.name:
                        continue
                    if r.len != p.len:
                        continue
                    i = 0
                    # This look is comparing a production ". A B C" with "A B C ."
                    while i < r.lr_index:
                        if r.prod[i] != p.prod[i+1]:
                            break
                        i = i + 1
                    else:
                        lookb.append((j, r))
            for i in includes:
                if i not in includedict:
                    includedict[i] = []
                includedict[i].append((state, N))
            lookdict[(state, N)] = lookb

        return lookdict, includedict

    # -----------------------------------------------------------------------------
    # compute_read_sets()
    #
    # Given a set of LR(0) items, this function computes the read sets.
    #
    # Inputs:  C        =  Set of LR(0) items
    #          ntrans   = Set of nonterminal transitions
    #          nullable = Set of empty transitions
    #
    # Returns a set containing the read sets
    # -----------------------------------------------------------------------------

    def compute_read_sets(self, C, ntrans, nullable):
        FP = lambda x: self.dr_relation(C, x, nullable)
        R =  lambda x: self.reads_relation(C, x, nullable)
        F = digraph(ntrans, R, FP)
        return F

    # -----------------------------------------------------------------------------
    # compute_follow_sets()
    #
    # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
    # and an include set, this function computes the follow sets
    #
    # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
    #
    # Inputs:
    #            ntrans     = Set of nonterminal transitions
    #            readsets   = Readset (previously computed)
    #            inclsets   = Include sets (previously computed)
    #
    # Returns a set containing the follow sets
    # -----------------------------------------------------------------------------

    def compute_follow_sets(self, ntrans, readsets, inclsets):
        FP = lambda x: readsets[x]
        R  = lambda x: inclsets.get(x, [])
        F = digraph(ntrans, R, FP)
        return F

    # -----------------------------------------------------------------------------
    # add_lookaheads()
    #
    # Attaches the lookahead symbols to grammar rules.
    #
    # Inputs:    lookbacks         -  Set of lookback relations
    #            followset         -  Computed follow set
    #
    # This function directly attaches the lookaheads to productions contained
    # in the lookbacks set
    # -----------------------------------------------------------------------------

    def add_lookaheads(self, lookbacks, followset):
        for trans, lb in lookbacks.items():
            # Loop over productions in lookback
            for state, p in lb:
                if state not in p.lookaheads:
                    p.lookaheads[state] = []
                f = followset.get(trans, [])
                for a in f:
                    if a not in p.lookaheads[state]:
                        p.lookaheads[state].append(a)

    # -----------------------------------------------------------------------------
    # add_lalr_lookaheads()
    #
    # This function does all of the work of adding lookahead information for use
    # with LALR parsing
    # -----------------------------------------------------------------------------

    def add_lalr_lookaheads(self, C):
        # Determine all of the nullable nonterminals
        nullable = self.compute_nullable_nonterminals()

        # Find all non-terminal transitions
        trans = self.find_nonterminal_transitions(C)

        # Compute read sets
        readsets = self.compute_read_sets(C, trans, nullable)

        # Compute lookback/includes relations
        lookd, included = self.compute_lookback_includes(C, trans, nullable)

        # Compute LALR FOLLOW sets
        followsets = self.compute_follow_sets(trans, readsets, included)

        # Add all of the lookaheads
        self.add_lookaheads(lookd, followsets)

    # -----------------------------------------------------------------------------
    # lr_parse_table()
    #
    # This function constructs the parse tables for SLR or LALR
    # -----------------------------------------------------------------------------
    def lr_parse_table(self):
        Productions = self.grammar.Productions
        Precedence  = self.grammar.Precedence
        goto   = self.lr_goto         # Goto array
        action = self.lr_action       # Action array
        log    = self.log             # Logger for output

        actionp = {}                  # Action production array (temporary)

        log.info('Parsing method: %s', self.lr_method)

        # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
        # This determines the number of states

        C = self.lr0_items()

        if self.lr_method == 'LALR':
            self.add_lalr_lookaheads(C)

        # Build the parser table, state by state
        st = 0
        for I in C:
            # Loop over each production in I
            actlist = []              # List of actions
            st_action  = {}
            st_actionp = {}
            st_goto    = {}
            log.info('')
            log.info('state %d', st)
            log.info('')
            for p in I:
                log.info('    (%d) %s', p.number, p)
            log.info('')

            for p in I:
                if p.len == p.lr_index + 1:
                    if p.name == "S'":
                        # Start symbol. Accept!
                        st_action['$end'] = 0
                        st_actionp['$end'] = p
                    else:
                        # We are at the end of a production.  Reduce!
                        if self.lr_method == 'LALR':
                            laheads = p.lookaheads[st]
                        else:
                            laheads = self.grammar.Follow[p.name]
                        for a in laheads:
                            actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
                            r = st_action.get(a)
                            if r is not None:
                                # Whoa. Have a shift/reduce or reduce/reduce conflict
                                if r > 0:
                                    # Need to decide on shift or reduce here
                                    # By default we favor shifting. Need to add
                                    # some precedence rules here.

                                    # Shift precedence comes from the token
                                    sprec, slevel = Precedence.get(a, ('right', 0))

                                    # Reduce precedence comes from rule being reduced (p)
                                    rprec, rlevel = Productions[p.number].prec

                                    if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
                                        # We really need to reduce here.
                                        st_action[a] = -p.number
                                        st_actionp[a] = p
                                        if not slevel and not rlevel:
                                            log.info('  ! shift/reduce conflict for %s resolved as reduce', a)
                                            self.sr_conflicts.append((st, a, 'reduce'))
                                        Productions[p.number].reduced += 1
                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
                                        st_action[a] = None
                                    else:
                                        # Hmmm. Guess we'll keep the shift
                                        if not rlevel:
                                            log.info('  ! shift/reduce conflict for %s resolved as shift', a)
                                            self.sr_conflicts.append((st, a, 'shift'))
                                elif r < 0:
                                    # Reduce/reduce conflict.   In this case, we favor the rule
                                    # that was defined first in the grammar file
                                    oldp = Productions[-r]
                                    pp = Productions[p.number]
                                    if oldp.line > pp.line:
                                        st_action[a] = -p.number
                                        st_actionp[a] = p
                                        chosenp, rejectp = pp, oldp
                                        Productions[p.number].reduced += 1
                                        Productions[oldp.number].reduced -= 1
                                    else:
                                        chosenp, rejectp = oldp, pp
                                    self.rr_conflicts.append((st, chosenp, rejectp))
                                    log.info('  ! reduce/reduce conflict for %s resolved using rule %d (%s)',
                                             a, st_actionp[a].number, st_actionp[a])
                                else:
                                    raise LALRError('Unknown conflict in state %d' % st)
                            else:
                                st_action[a] = -p.number
                                st_actionp[a] = p
                                Productions[p.number].reduced += 1
                else:
                    i = p.lr_index
                    a = p.prod[i+1]       # Get symbol right after the "."
                    if a in self.grammar.Terminals:
                        g = self.lr0_goto(I, a)
                        j = self.lr0_cidhash.get(id(g), -1)
                        if j >= 0:
                            # We are in a shift state
                            actlist.append((a, p, 'shift and go to state %d' % j))
                            r = st_action.get(a)
                            if r is not None:
                                # Whoa have a shift/reduce or shift/shift conflict
                                if r > 0:
                                    if r != j:
                                        raise LALRError('Shift/shift conflict in state %d' % st)
                                elif r < 0:
                                    # Do a precedence check.
                                    #   -  if precedence of reduce rule is higher, we reduce.
                                    #   -  if precedence of reduce is same and left assoc, we reduce.
                                    #   -  otherwise we shift

                                    # Shift precedence comes from the token
                                    sprec, slevel = Precedence.get(a, ('right', 0))

                                    # Reduce precedence comes from the rule that could have been reduced
                                    rprec, rlevel = Productions[st_actionp[a].number].prec

                                    if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
                                        # We decide to shift here... highest precedence to shift
                                        Productions[st_actionp[a].number].reduced -= 1
                                        st_action[a] = j
                                        st_actionp[a] = p
                                        if not rlevel:
                                            log.info('  ! shift/reduce conflict for %s resolved as shift', a)
                                            self.sr_conflicts.append((st, a, 'shift'))
                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
                                        st_action[a] = None
                                    else:
                                        # Hmmm. Guess we'll keep the reduce
                                        if not slevel and not rlevel:
                                            log.info('  ! shift/reduce conflict for %s resolved as reduce', a)
                                            self.sr_conflicts.append((st, a, 'reduce'))

                                else:
                                    raise LALRError('Unknown conflict in state %d' % st)
                            else:
                                st_action[a] = j
                                st_actionp[a] = p

            # Print the actions associated with each terminal
            _actprint = {}
            for a, p, m in actlist:
                if a in st_action:
                    if p is st_actionp[a]:
                        log.info('    %-15s %s', a, m)
                        _actprint[(a, m)] = 1
            log.info('')
            # Print the actions that were not used. (debugging)
            not_used = 0
            for a, p, m in actlist:
                if a in st_action:
                    if p is not st_actionp[a]:
                        if not (a, m) in _actprint:
                            log.debug('  ! %-15s [ %s ]', a, m)
                            not_used = 1
                            _actprint[(a, m)] = 1
            if not_used:
                log.debug('')

            # Construct the goto table for this state

            nkeys = {}
            for ii in I:
                for s in ii.usyms:
                    if s in self.grammar.Nonterminals:
                        nkeys[s] = None
            for n in nkeys:
                g = self.lr0_goto(I, n)
                j = self.lr0_cidhash.get(id(g), -1)
                if j >= 0:
                    st_goto[n] = j
                    log.info('    %-30s shift and go to state %d', n, j)

            action[st] = st_action
            actionp[st] = st_actionp
            goto[st] = st_goto
            st += 1

    # -----------------------------------------------------------------------------
    # write()
    #
    # This function writes the LR parsing tables to a file
    # -----------------------------------------------------------------------------

    def write_table(self, tabmodule, outputdir='', signature=''):
        if isinstance(tabmodule, types.ModuleType):
            raise IOError("Won't overwrite existing tabmodule")

        basemodulename = tabmodule.split('.')[-1]
        filename = os.path.join(outputdir, basemodulename) + '.py'
        try:
            f = open(filename, 'w')

            f.write('''
# %s
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = %r

_lr_method = %r

_lr_signature = %r
    ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))

            # Change smaller to 0 to go back to original tables
            smaller = 1

            # Factor out names to try and make smaller
            if smaller:
                items = {}

                for s, nd in self.lr_action.items():
                    for name, v in nd.items():
                        i = items.get(name)
                        if not i:
                            i = ([], [])
                            items[name] = i
                        i[0].append(s)
                        i[1].append(v)

                f.write('\n_lr_action_items = {')
                for k, v in items.items():
                    f.write('%r:([' % k)
                    for i in v[0]:
                        f.write('%r,' % i)
                    f.write('],[')
                    for i in v[1]:
                        f.write('%r,' % i)

                    f.write(']),')
                f.write('}\n')

                f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
   for _x,_y in zip(_v[0],_v[1]):
      if not _x in _lr_action:  _lr_action[_x] = {}
      _lr_action[_x][_k] = _y
del _lr_action_items
''')

            else:
                f.write('\n_lr_action = { ')
                for k, v in self.lr_action.items():
                    f.write('(%r,%r):%r,' % (k[0], k[1], v))
                f.write('}\n')

            if smaller:
                # Factor out names to try and make smaller
                items = {}

                for s, nd in self.lr_goto.items():
                    for name, v in nd.items():
                        i = items.get(name)
                        if not i:
                            i = ([], [])
                            items[name] = i
                        i[0].append(s)
                        i[1].append(v)

                f.write('\n_lr_goto_items = {')
                for k, v in items.items():
                    f.write('%r:([' % k)
                    for i in v[0]:
                        f.write('%r,' % i)
                    f.write('],[')
                    for i in v[1]:
                        f.write('%r,' % i)

                    f.write(']),')
                f.write('}\n')

                f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
   for _x, _y in zip(_v[0], _v[1]):
       if not _x in _lr_goto: _lr_goto[_x] = {}
       _lr_goto[_x][_k] = _y
del _lr_goto_items
''')
            else:
                f.write('\n_lr_goto = { ')
                for k, v in self.lr_goto.items():
                    f.write('(%r,%r):%r,' % (k[0], k[1], v))
                f.write('}\n')

            # Write production table
            f.write('_lr_productions = [\n')
            for p in self.lr_productions:
                if p.func:
                    f.write('  (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
                                                          p.func, os.path.basename(p.file), p.line))
                else:
                    f.write('  (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
            f.write(']\n')
            f.close()

        except IOError as e:
            raise


    # -----------------------------------------------------------------------------
    # pickle_table()
    #
    # This function pickles the LR parsing tables to a supplied file object
    # -----------------------------------------------------------------------------

    def pickle_table(self, filename, signature=''):
        try:
            import cPickle as pickle
        except ImportError:
            import pickle
        with open(filename, 'wb') as outf:
            pickle.dump(__tabversion__, outf, pickle_protocol)
            pickle.dump(self.lr_method, outf, pickle_protocol)
            pickle.dump(signature, outf, pickle_protocol)
            pickle.dump(self.lr_action, outf, pickle_protocol)
            pickle.dump(self.lr_goto, outf, pickle_protocol)

            outp = []
            for p in self.lr_productions:
                if p.func:
                    outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
                else:
                    outp.append((str(p), p.name, p.len, None, None, None))
            pickle.dump(outp, outf, pickle_protocol)

# -----------------------------------------------------------------------------
#                            === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------

# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack.  This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------

def get_caller_module_dict(levels):
    f = sys._getframe(levels)
    ldict = f.f_globals.copy()
    if f.f_globals != f.f_locals:
        ldict.update(f.f_locals)
    return ldict

# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
    grammar = []
    # Split the doc string into lines
    pstrings = doc.splitlines()
    lastp = None
    dline = line
    for ps in pstrings:
        dline += 1
        p = ps.split()
        if not p:
            continue
        try:
            if p[0] == '|':
                # This is a continuation of a previous rule
                if not lastp:
                    raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
                prodname = lastp
                syms = p[1:]
            else:
                prodname = p[0]
                lastp = prodname
                syms   = p[2:]
                assign = p[1]
                if assign != ':' and assign != '::=':
                    raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))

            grammar.append((file, dline, prodname, syms))
        except SyntaxError:
            raise
        except Exception:
            raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))

    return grammar

# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
    def __init__(self, pdict, log=None):
        self.pdict      = pdict
        self.start      = None
        self.error_func = None
        self.tokens     = None
        self.modules    = set()
        self.grammar    = []
        self.error      = False

        if log is None:
            self.log = PlyLogger(sys.stderr)
        else:
            self.log = log

    # Get all of the basic information
    def get_all(self):
        self.get_start()
        self.get_error_func()
        self.get_tokens()
        self.get_precedence()
        self.get_pfunctions()

    # Validate all of the information
    def validate_all(self):
        self.validate_start()
        self.validate_error_func()
        self.validate_tokens()
        self.validate_precedence()
        self.validate_pfunctions()
        self.validate_modules()
        return self.error

    # Compute a signature over the grammar
    def signature(self):
        parts = []
        try:
            if self.start:
                parts.append(self.start)
            if self.prec:
                parts.append(''.join([''.join(p) for p in self.prec]))
            if self.tokens:
                parts.append(' '.join(self.tokens))
            for f in self.pfuncs:
                if f[3]:
                    parts.append(f[3])
        except (TypeError, ValueError):
            pass
        return ''.join(parts)

    # -----------------------------------------------------------------------------
    # validate_modules()
    #
    # This method checks to see if there are duplicated p_rulename() functions
    # in the parser module file.  Without this function, it is really easy for
    # users to make mistakes by cutting and pasting code fragments (and it's a real
    # bugger to try and figure out why the resulting parser doesn't work).  Therefore,
    # we just do a little regular expression pattern matching of def statements
    # to try and detect duplicates.
    # -----------------------------------------------------------------------------

    def validate_modules(self):
        # Match def p_funcname(
        fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')

        for module in self.modules:
            try:
                lines, linen = inspect.getsourcelines(module)
            except IOError:
                continue

            counthash = {}
            for linen, line in enumerate(lines):
                linen += 1
                m = fre.match(line)
                if m:
                    name = m.group(1)
                    prev = counthash.get(name)
                    if not prev:
                        counthash[name] = linen
                    else:
                        filename = inspect.getsourcefile(module)
                        self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
                                         filename, linen, name, prev)

    # Get the start symbol
    def get_start(self):
        self.start = self.pdict.get('start')

    # Validate the start symbol
    def validate_start(self):
        if self.start is not None:
            if not isinstance(self.start, string_types):
                self.log.error("'start' must be a string")

    # Look for error handler
    def get_error_func(self):
        self.error_func = self.pdict.get('p_error')

    # Validate the error function
    def validate_error_func(self):
        if self.error_func:
            if isinstance(self.error_func, types.FunctionType):
                ismethod = 0
            elif isinstance(self.error_func, types.MethodType):
                ismethod = 1
            else:
                self.log.error("'p_error' defined, but is not a function or method")
                self.error = True
                return

            eline = self.error_func.__code__.co_firstlineno
            efile = self.error_func.__code__.co_filename
            module = inspect.getmodule(self.error_func)
            self.modules.add(module)

            argcount = self.error_func.__code__.co_argcount - ismethod
            if argcount != 1:
                self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
                self.error = True

    # Get the tokens map
    def get_tokens(self):
        tokens = self.pdict.get('tokens')
        if not tokens:
            self.log.error('No token list is defined')
            self.error = True
            return

        if not isinstance(tokens, (list, tuple)):
            self.log.error('tokens must be a list or tuple')
            self.error = True
            return

        if not tokens:
            self.log.error('tokens is empty')
            self.error = True
            return

        self.tokens = sorted(tokens)

    # Validate the tokens
    def validate_tokens(self):
        # Validate the tokens.
        if 'error' in self.tokens:
            self.log.error("Illegal token name 'error'. Is a reserved word")
            self.error = True
            return

        terminals = set()
        for n in self.tokens:
            if n in terminals:
                self.log.warning('Token %r multiply defined', n)
            terminals.add(n)

    # Get the precedence map (if any)
    def get_precedence(self):
        self.prec = self.pdict.get('precedence')

    # Validate and parse the precedence map
    def validate_precedence(self):
        preclist = []
        if self.prec:
            if not isinstance(self.prec, (list, tuple)):
                self.log.error('precedence must be a list or tuple')
                self.error = True
                return
            for level, p in enumerate(self.prec):
                if not isinstance(p, (list, tuple)):
                    self.log.error('Bad precedence table')
                    self.error = True
                    return

                if len(p) < 2:
                    self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
                    self.error = True
                    return
                assoc = p[0]
                if not isinstance(assoc, string_types):
                    self.log.error('precedence associativity must be a string')
                    self.error = True
                    return
                for term in p[1:]:
                    if not isinstance(term, string_types):
                        self.log.error('precedence items must be strings')
                        self.error = True
                        return
                    preclist.append((term, assoc, level+1))
        self.preclist = preclist

    # Get all p_functions from the grammar
    def get_pfunctions(self):
        p_functions = []
        for name, item in self.pdict.items():
            if not name.startswith('p_') or name == 'p_error':
                continue
            if isinstance(item, (types.FunctionType, types.MethodType)):
                line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
                module = inspect.getmodule(item)
                p_functions.append((line, module, name, item.__doc__))

        # Sort all of the actions by line number; make sure to stringify
        # modules to make them sortable, since `line` may not uniquely sort all
        # p functions
        p_functions.sort(key=lambda p_function: (
            p_function[0],
            str(p_function[1]),
            p_function[2],
            p_function[3]))
        self.pfuncs = p_functions

    # Validate all of the p_functions
    def validate_pfunctions(self):
        grammar = []
        # Check for non-empty symbols
        if len(self.pfuncs) == 0:
            self.log.error('no rules of the form p_rulename are defined')
            self.error = True
            return

        for line, module, name, doc in self.pfuncs:
            file = inspect.getsourcefile(module)
            func = self.pdict[name]
            if isinstance(func, types.MethodType):
                reqargs = 2
            else:
                reqargs = 1
            if func.__code__.co_argcount > reqargs:
                self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
                self.error = True
            elif func.__code__.co_argcount < reqargs:
                self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
                self.error = True
            elif not func.__doc__:
                self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
                                 file, line, func.__name__)
            else:
                try:
                    parsed_g = parse_grammar(doc, file, line)
                    for g in parsed_g:
                        grammar.append((name, g))
                except SyntaxError as e:
                    self.log.error(str(e))
                    self.error = True

                # Looks like a valid grammar rule
                # Mark the file in which defined.
                self.modules.add(module)

        # Secondary validation step that looks for p_ definitions that are not functions
        # or functions that look like they might be grammar rules.

        for n, v in self.pdict.items():
            if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
                continue
            if n.startswith('t_'):
                continue
            if n.startswith('p_') and n != 'p_error':
                self.log.warning('%r not defined as a function', n)
            if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
                   (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
                if v.__doc__:
                    try:
                        doc = v.__doc__.split(' ')
                        if doc[1] == ':':
                            self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
                                             v.__code__.co_filename, v.__code__.co_firstlineno, n)
                    except IndexError:
                        pass

        self.grammar = grammar

# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------

def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
         check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
         outputdir=None, debuglog=None, errorlog=None, picklefile=None):

    if tabmodule is None:
        tabmodule = tab_module

    # Reference to the parsing method of the last built parser
    global parse

    # If pickling is enabled, table files are not created
    if picklefile:
        write_tables = 0

    if errorlog is None:
        errorlog = PlyLogger(sys.stderr)

    # Get the module dictionary used for the parser
    if module:
        _items = [(k, getattr(module, k)) for k in dir(module)]
        pdict = dict(_items)
        # If no __file__ or __package__ attributes are available, try to obtain them
        # from the __module__ instead
        if '__file__' not in pdict:
            pdict['__file__'] = sys.modules[pdict['__module__']].__file__
        if '__package__' not in pdict and '__module__' in pdict:
            if hasattr(sys.modules[pdict['__module__']], '__package__'):
                pdict['__package__'] = sys.modules[pdict['__module__']].__package__
    else:
        pdict = get_caller_module_dict(2)

    if outputdir is None:
        # If no output directory is set, the location of the output files
        # is determined according to the following rules:
        #     - If tabmodule specifies a package, files go into that package directory
        #     - Otherwise, files go in the same directory as the specifying module
        if isinstance(tabmodule, types.ModuleType):
            srcfile = tabmodule.__file__
        else:
            if '.' not in tabmodule:
                srcfile = pdict['__file__']
            else:
                parts = tabmodule.split('.')
                pkgname = '.'.join(parts[:-1])
                exec('import %s' % pkgname)
                srcfile = getattr(sys.modules[pkgname], '__file__', '')
        outputdir = os.path.dirname(srcfile)

    # Determine if the module is package of a package or not.
    # If so, fix the tabmodule setting so that tables load correctly
    pkg = pdict.get('__package__')
    if pkg and isinstance(tabmodule, str):
        if '.' not in tabmodule:
            tabmodule = pkg + '.' + tabmodule



    # Set start symbol if it's specified directly using an argument
    if start is not None:
        pdict['start'] = start

    # Collect parser information from the dictionary
    pinfo = ParserReflect(pdict, log=errorlog)
    pinfo.get_all()

    if pinfo.error:
        raise YaccError('Unable to build parser')

    # Check signature against table files (if any)
    signature = pinfo.signature()

    # Read the tables
    try:
        lr = LRTable()
        if picklefile:
            read_signature = lr.read_pickle(picklefile)
        else:
            read_signature = lr.read_table(tabmodule)
        if optimize or (read_signature == signature):
            try:
                lr.bind_callables(pinfo.pdict)
                parser = LRParser(lr, pinfo.error_func)
                parse = parser.parse
                return parser
            except Exception as e:
                errorlog.warning('There was a problem loading the table file: %r', e)
    except VersionError as e:
        errorlog.warning(str(e))
    except ImportError:
        pass

    if debuglog is None:
        if debug:
            try:
                debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
            except IOError as e:
                errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
                debuglog = NullLogger()
        else:
            debuglog = NullLogger()

    debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)

    errors = False

    # Validate the parser information
    if pinfo.validate_all():
        raise YaccError('Unable to build parser')

    if not pinfo.error_func:
        errorlog.warning('no p_error() function is defined')

    # Create a grammar object
    grammar = Grammar(pinfo.tokens)

    # Set precedence level for terminals
    for term, assoc, level in pinfo.preclist:
        try:
            grammar.set_precedence(term, assoc, level)
        except GrammarError as e:
            errorlog.warning('%s', e)

    # Add productions to the grammar
    for funcname, gram in pinfo.grammar:
        file, line, prodname, syms = gram
        try:
            grammar.add_production(prodname, syms, funcname, file, line)
        except GrammarError as e:
            errorlog.error('%s', e)
            errors = True

    # Set the grammar start symbols
    try:
        if start is None:
            grammar.set_start(pinfo.start)
        else:
            grammar.set_start(start)
    except GrammarError as e:
        errorlog.error(str(e))
        errors = True

    if errors:
        raise YaccError('Unable to build parser')

    # Verify the grammar structure
    undefined_symbols = grammar.undefined_symbols()
    for sym, prod in undefined_symbols:
        errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
        errors = True

    unused_terminals = grammar.unused_terminals()
    if unused_terminals:
        debuglog.info('')
        debuglog.info('Unused terminals:')
        debuglog.info('')
        for term in unused_terminals:
            errorlog.warning('Token %r defined, but not used', term)
            debuglog.info('    %s', term)

    # Print out all productions to the debug log
    if debug:
        debuglog.info('')
        debuglog.info('Grammar')
        debuglog.info('')
        for n, p in enumerate(grammar.Productions):
            debuglog.info('Rule %-5d %s', n, p)

    # Find unused non-terminals
    unused_rules = grammar.unused_rules()
    for prod in unused_rules:
        errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)

    if len(unused_terminals) == 1:
        errorlog.warning('There is 1 unused token')
    if len(unused_terminals) > 1:
        errorlog.warning('There are %d unused tokens', len(unused_terminals))

    if len(unused_rules) == 1:
        errorlog.warning('There is 1 unused rule')
    if len(unused_rules) > 1:
        errorlog.warning('There are %d unused rules', len(unused_rules))

    if debug:
        debuglog.info('')
        debuglog.info('Terminals, with rules where they appear')
        debuglog.info('')
        terms = list(grammar.Terminals)
        terms.sort()
        for term in terms:
            debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))

        debuglog.info('')
        debuglog.info('Nonterminals, with rules where they appear')
        debuglog.info('')
        nonterms = list(grammar.Nonterminals)
        nonterms.sort()
        for nonterm in nonterms:
            debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
        debuglog.info('')

    if check_recursion:
        unreachable = grammar.find_unreachable()
        for u in unreachable:
            errorlog.warning('Symbol %r is unreachable', u)

        infinite = grammar.infinite_cycles()
        for inf in infinite:
            errorlog.error('Infinite recursion detected for symbol %r', inf)
            errors = True

    unused_prec = grammar.unused_precedence()
    for term, assoc in unused_prec:
        errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
        errors = True

    if errors:
        raise YaccError('Unable to build parser')

    # Run the LRGeneratedTable on the grammar
    if debug:
        errorlog.debug('Generating %s tables', method)

    lr = LRGeneratedTable(grammar, method, debuglog)

    if debug:
        num_sr = len(lr.sr_conflicts)

        # Report shift/reduce and reduce/reduce conflicts
        if num_sr == 1:
            errorlog.warning('1 shift/reduce conflict')
        elif num_sr > 1:
            errorlog.warning('%d shift/reduce conflicts', num_sr)

        num_rr = len(lr.rr_conflicts)
        if num_rr == 1:
            errorlog.warning('1 reduce/reduce conflict')
        elif num_rr > 1:
            errorlog.warning('%d reduce/reduce conflicts', num_rr)

    # Write out conflicts to the output file
    if debug and (lr.sr_conflicts or lr.rr_conflicts):
        debuglog.warning('')
        debuglog.warning('Conflicts:')
        debuglog.warning('')

        for state, tok, resolution in lr.sr_conflicts:
            debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s',  tok, state, resolution)

        already_reported = set()
        for state, rule, rejected in lr.rr_conflicts:
            if (state, id(rule), id(rejected)) in already_reported:
                continue
            debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
            debuglog.warning('rejected rule (%s) in state %d', rejected, state)
            errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
            errorlog.warning('rejected rule (%s) in state %d', rejected, state)
            already_reported.add((state, id(rule), id(rejected)))

        warned_never = []
        for state, rule, rejected in lr.rr_conflicts:
            if not rejected.reduced and (rejected not in warned_never):
                debuglog.warning('Rule (%s) is never reduced', rejected)
                errorlog.warning('Rule (%s) is never reduced', rejected)
                warned_never.append(rejected)

    # Write the table file if requested
    if write_tables:
        try:
            lr.write_table(tabmodule, outputdir, signature)
            if tabmodule in sys.modules:
                del sys.modules[tabmodule]
        except IOError as e:
            errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))

    # Write a pickled version of the tables
    if picklefile:
        try:
            lr.pickle_table(picklefile, signature)
        except IOError as e:
            errorlog.warning("Couldn't create %r. %s" % (picklefile, e))

    # Build the parser
    lr.bind_callables(pinfo.pdict)
    parser = LRParser(lr, pinfo.error_func)

    parse = parser.parse
    return parser
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat 
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

"""
Classes representing basic access.

SELinux - at the most basic level - represents access as
the 4-tuple subject (type or context), target (type or context),
object class, permission. The policy language elaborates this basic
access to faciliate more concise rules (e.g., allow rules can have multiple
source or target types - see refpolicy for more information).

This module has objects for representing the most basic access (AccessVector)
and sets of that access (AccessVectorSet). These objects are used in Madison
in a variety of ways, but they are the fundamental representation of access.
"""

from . import refpolicy
from . import util

from selinux import audit2why

def is_idparam(id):
    """Determine if an id is a paramater in the form $N, where N is
    an integer.

    Returns:
      True if the id is a paramater
      False if the id is not a paramater
    """
    if len(id) > 1 and id[0] == '$':
        try:
            int(id[1:])
        except ValueError:
            return False
        return True
    else:
        return False

class AccessVector(util.Comparison):
    """
    An access vector is the basic unit of access in SELinux.

    Access vectors are the most basic representation of access within
    SELinux. It represents the access a source type has to a target
    type in terms of an object class and a set of permissions.

    Access vectors are distinct from AVRules in that they can only
    store a single source type, target type, and object class. The
    simplicity of AccessVectors makes them useful for storing access
    in a form that is easy to search and compare.

    The source, target, and object are stored as string. No checking
    done to verify that the strings are valid SELinux identifiers.
    Identifiers in the form $N (where N is an integer) are reserved as
    interface parameters and are treated as wild cards in many
    circumstances.

    Properties:
     .src_type - The source type allowed access. [String or None]
     .tgt_type - The target type to which access is allowed. [String or None]
     .obj_class - The object class to which access is allowed. [String or None]
     .perms - The permissions allowed to the object class. [IdSet]
     .audit_msgs - The audit messages that generated this access vector [List of strings]
     .xperms - Extended permissions attached to the AV. [Dictionary {operation: xperm set}]
    """
    def __init__(self, init_list=None):
        if init_list:
            self.from_list(init_list)
        else:
            self.src_type = None
            self.tgt_type = None
            self.obj_class = None
            self.perms = refpolicy.IdSet()

        self.audit_msgs = []
        self.type = audit2why.TERULE
        self.data = []
        self.xperms = {}
        # when implementing __eq__ also __hash__ is needed on py2
        # if object is muttable __hash__ should be None
        self.__hash__ = None

        # The direction of the information flow represented by this
        # access vector - used for matching
        self.info_flow_dir = None

    def from_list(self, list):
        """Initialize an access vector from a list.

        Initialize an access vector from a list treating the list as
        positional arguments - i.e., 0 = src_type, 1 = tgt_type, etc.
        All of the list elements 3 and greater are treated as perms.
        For example, the list ['foo_t', 'bar_t', 'file', 'read', 'write']
        would create an access vector list with the source type 'foo_t',
        target type 'bar_t', object class 'file', and permissions 'read'
        and 'write'.

        This format is useful for very simple storage to strings or disc
        (see to_list) and for initializing access vectors.
        """
        if len(list) < 4:
            raise ValueError("List must contain at least four elements %s" % str(list))
        self.src_type = list[0]
        self.tgt_type = list[1]
        self.obj_class = list[2]
        self.perms = refpolicy.IdSet(list[3:])

    def to_list(self):
        """
        Convert an access vector to a list.

        Convert an access vector to a list treating the list as positional
        values. See from_list for more information on how an access vector
        is represented in a list.
        """
        l = [self.src_type, self.tgt_type, self.obj_class]
        l.extend(sorted(self.perms))
        return l

    def merge(self, av):
        """Add permissions and extended permissions from AV"""
        self.perms.update(av.perms)

        for op in av.xperms:
            if op not in self.xperms:
                self.xperms[op] = refpolicy.XpermSet()
            self.xperms[op].extend(av.xperms[op])

    def __str__(self):
        return self.to_string()

    def to_string(self):
        return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
                                        self.obj_class, self.perms.to_space_str())

    def _compare(self, other, method):
        try:
            x = list(self.perms)
            a = (self.src_type, self.tgt_type, self.obj_class, x)
            y = list(other.perms)
            x.sort()
            y.sort()
            b = (other.src_type, other.tgt_type, other.obj_class, y)
            return method(a, b)
        except (AttributeError, TypeError):
            # trying to compare to foreign type
            return NotImplemented


def avrule_to_access_vectors(avrule):
    """Convert an avrule into a list of access vectors.

    AccessVectors and AVRules are similary, but differ in that
    an AVRule can more than one source type, target type, and
    object class. This function expands a single avrule into a
    list of one or more AccessVectors representing the access
    defined in the AVRule.

    
    """
    if isinstance(avrule, AccessVector):
        return [avrule]
    a = []
    for src_type in avrule.src_types:
        for tgt_type in avrule.tgt_types:
            for obj_class in avrule.obj_classes:
                access = AccessVector()
                access.src_type = src_type
                access.tgt_type = tgt_type
                access.obj_class = obj_class
                access.perms = avrule.perms.copy()
                a.append(access)
    return a

class AccessVectorSet:
    """A non-overlapping set of access vectors.

    An AccessVectorSet is designed to store one or more access vectors
    that are non-overlapping. Access can be added to the set
    incrementally and access vectors will be added or merged as
    necessary.  For example, adding the following access vectors using
    add_av:
       allow $1 etc_t : read;
       allow $1 etc_t : write;
       allow $1 var_log_t : read;
    Would result in an access vector set with the access vectors:
       allow $1 etc_t : { read write};
       allow $1 var_log_t : read;
    """
    def __init__(self):
        """Initialize an access vector set.
        """
        self.src = {}
        # The information flow direction of this access vector
        # set - see objectmodel.py for more information. This
        # stored here to speed up searching - see matching.py.
        self.info_dir = None

    def __iter__(self):
        """Iterate over all of the unique access vectors in the set."""
        for tgts in self.src.values():
            for objs in tgts.values():
                for av in objs.values():
                    yield av

    def __len__(self):
        """Return the number of unique access vectors in the set.

        Because of the inernal representation of the access vector set,
        __len__ is not a constant time operation. Worst case is O(N)
        where N is the number of unique access vectors, but the common
        case is probably better.
        """
        l = 0
        for tgts in self.src.values():
            for objs in tgts.values():
               l += len(objs)
        return l

    def to_list(self):
        """Return the unique access vectors in the set as a list.

        The format of the returned list is a set of nested lists,
        each access vector represented by a list. This format is
        designed to be simply  serializable to a file.

        For example, consider an access vector set with the following
        access vectors:
          allow $1 user_t : file read;
          allow $1 etc_t : file { read write};
        to_list would return the following:
          [[$1, user_t, file, read]
           [$1, etc_t, file, read, write]]

        See AccessVector.to_list for more information.
        """
        l = []
        for av in self:
            l.append(av.to_list())

        return l

    def from_list(self, l):
        """Add access vectors stored in a list.

        See to list for more information on the list format that this
        method accepts.

        This will add all of the access from the list. Any existing
        access vectors in the set will be retained.
        """
        for av in l:
            self.add_av(AccessVector(av))

    def add(self, src_type, tgt_type, obj_class, perms, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
        """Add an access vector to the set.
        """
        av = AccessVector()
        av.src_type = src_type
        av.tgt_type = tgt_type
        av.obj_class = obj_class
        av.perms = perms
        av.data = data
        av.type = avc_type

        self.add_av(av, audit_msg)

    def add_av(self, av, audit_msg=None):
        """Add an access vector to the set."""
        tgt = self.src.setdefault(av.src_type, { })
        cls = tgt.setdefault(av.tgt_type, { })

        if (av.obj_class, av.type) in cls:
            cls[av.obj_class, av.type].merge(av)
        else:
            cls[av.obj_class, av.type] = av

        if audit_msg:
            cls[av.obj_class, av.type].audit_msgs.append(audit_msg)

def avs_extract_types(avs):
    types = refpolicy.IdSet()
    for av in avs:
        types.add(av.src_type)
        types.add(av.tgt_type)
        
    return types

def avs_extract_obj_perms(avs):
    perms = { }
    for av in avs:
        if av.obj_class in perms:
            s = perms[av.obj_class]
        else:
            s = refpolicy.IdSet()
            perms[av.obj_class] = s
        s.update(av.perms)
    return perms

class RoleTypeSet:
    """A non-overlapping set of role type statements.

    This clas allows the incremental addition of role type statements and
    maintains a non-overlapping list of statements.
    """
    def __init__(self):
        """Initialize an access vector set."""
        self.role_types = {}

    def __iter__(self):
        """Iterate over all of the unique role allows statements in the set."""
        for role_type in self.role_types.values():
            yield role_type

    def __len__(self):
        """Return the unique number of role allow statements."""
        return len(self.role_types.keys())

    def add(self, role, type):
        if role in self.role_types:
            role_type = self.role_types[role]
        else:
            role_type = refpolicy.RoleType()
            role_type.role = role
            self.role_types[role] = role_type

        role_type.types.add(type)
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

"""
This module provides knowledge object classes and permissions. It should
be used to keep this knowledge from leaking into the more generic parts of
the policy generation.
"""

# Objects that can be implicitly typed - these objects do
# not _have_ to be implicitly typed (e.g., sockets can be
# explicitly labeled), but they often are.
#
# File is in this list for /proc/self
#
# This list is useful when dealing with rules that have a
# type (or param) used as both a subject and object. For
# example:
#
#   allow httpd_t httpd_t : socket read;
#
# This rule makes sense because the socket was (presumably) created
# by a process with the type httpd_t.
implicitly_typed_objects = ["socket", "fd", "process", "file", "lnk_file", "fifo_file",
                            "dbus", "capability", "unix_stream_socket"]

#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#Information Flow
#
# All of the permissions in SELinux can be described in terms of
# information flow. For example, a read of a file is a flow of
# information from that file to the process reading. Viewing
# permissions in these terms can be used to model a varity of
# security properties.
#
# Here we have some infrastructure for understanding permissions
# in terms of information flow
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

# Information flow deals with information either flowing from a subject
# to and object ("write") or to a subject from an object ("read"). Read
# or write is described from the subject point-of-view. It is also possible
# for a permission to represent both a read and write (though the flow is
# typical asymettric in terms of bandwidth). It is also possible for
# permission to not flow information (meaning that the result is pure
# side-effect).
#
# The following constants are for representing the directionality
# of information flow.
FLOW_NONE  = 0
FLOW_READ  = 1
FLOW_WRITE = 2
FLOW_BOTH  = FLOW_READ | FLOW_WRITE

# These are used by the parser and for nice disply of the directions
str_to_dir = { "n" : FLOW_NONE, "r" : FLOW_READ, "w" : FLOW_WRITE, "b" : FLOW_BOTH }
dir_to_str = { FLOW_NONE : "n", FLOW_READ : "r", FLOW_WRITE : "w", FLOW_BOTH : "b" }

class PermMap:
    """A mapping between a permission and its information flow properties.

    PermMap represents the information flow properties of a single permission
    including the direction (read, write, etc.) and an abstract representation
    of the bandwidth of the flow (weight).
    """
    def __init__(self, perm, dir, weight):
        self.perm = perm
        self.dir = dir
        self.weight = weight

    def __repr__(self):
        return "<sepolgen.objectmodel.PermMap %s %s %d>" % (self.perm,
                                                           dir_to_str[self.dir],
                                                           self.weight)

class PermMappings:
    """The information flow properties of a set of object classes and permissions.

    PermMappings maps one or more classes and permissions to their PermMap objects
    describing their information flow charecteristics.
    """
    def __init__(self):
        self.classes = { }
        self.default_weight = 5
        self.default_dir = FLOW_BOTH

    def from_file(self, fd):
        """Read the permission mappings from a file. This reads the format used
        by Apol in the setools suite.
        """
        # This parsing is deliberitely picky and bails at the least error. It
        # is assumed that the permission map file will be shipped as part
        # of sepolgen and not user modified, so this is a reasonable design
        # choice. If user supplied permission mappings are needed the parser
        # should be made a little more robust and give better error messages.
        cur = None
        for line in fd:
            fields = line.split()
            if len(fields) == 0 or len(fields) == 1 or fields[0] == "#":
                continue
            if fields[0] == "class":
                c = fields[1]
                if c in self.classes:
                    raise ValueError("duplicate class in perm map")
                self.classes[c] = { }
                cur = self.classes[c]
            else:
                if len(fields) != 3:
                    raise ValueError("error in object classs permissions")
                if cur is None:
                    raise ValueError("permission outside of class")
                pm = PermMap(fields[0], str_to_dir[fields[1]], int(fields[2]))
                cur[pm.perm] = pm

    def get(self, obj, perm):
        """Get the permission map for the object permission.

        Returns:
          PermMap representing the permission
        Raises:
          KeyError if the object or permission is not defined
        """
        return self.classes[obj][perm]

    def getdefault(self, obj, perm):
        """Get the permission map for the object permission or a default.

        getdefault is the same as get except that a default PermMap is
        returned if the object class or permission is not defined. The
        default is FLOW_BOTH with a weight of 5.
        """
        try:
            pm = self.classes[obj][perm]
        except KeyError:
            return PermMap(perm, self.default_dir, self.default_weight)
        return pm

    def getdefault_direction(self, obj, perms):
        dir = FLOW_NONE
        for perm in perms:
            pm = self.getdefault(obj, perm)
            dir = dir | pm.dir
        return dir

    def getdefault_distance(self, obj, perms):
        total = 0
        for perm in perms:
            pm = self.getdefault(obj, perm)
            total += pm.weight

        return total



# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

"""
Classes and algorithms for matching requested access to access vectors.
"""

import itertools

from . import access
from . import objectmodel
from . import util


class Match(util.Comparison):
    def __init__(self, interface=None, dist=0):
        self.interface = interface
        self.dist = dist
        self.info_dir_change = False
        # when implementing __eq__ also __hash__ is needed on py2
        # if object is muttable __hash__ should be None
        self.__hash__ = None

    def _compare(self, other, method):
        try:
            a = (self.dist, self.info_dir_change)
            b = (other.dist, other.info_dir_change)
            return method(a, b)
        except (AttributeError, TypeError):
            # trying to compare to foreign type
            return NotImplemented

class MatchList:
    DEFAULT_THRESHOLD = 150
    def __init__(self):
        # Match objects that pass the threshold
        self.children = []
        # Match objects over the threshold
        self.bastards = []
        self.threshold = self.DEFAULT_THRESHOLD
        self.allow_info_dir_change = False
        self.av = None

    def best(self):
        if len(self.children):
            return self.children[0]
        if len(self.bastards):
            return self.bastards[0]
        return None

    def __len__(self):
        # Only return the length of the matches so
        # that this can be used to test if there is
        # a match.
        return len(self.children) + len(self.bastards)

    def __iter__(self):
        return iter(self.children)

    def all(self):
        return itertools.chain(self.children, self.bastards)

    def append(self, match):
        if match.dist <= self.threshold:
            if not match.info_dir_change or self.allow_info_dir_change:
                self.children.append(match)
            else:
                self.bastards.append(match)
        else:
            self.bastards.append(match)

    def sort(self):
        self.children.sort()
        self.bastards.sort()
                

class AccessMatcher:
    def __init__(self, perm_maps=None):
        self.type_penalty = 10
        self.obj_penalty = 10
        if perm_maps:
            self.perm_maps = perm_maps
        else:
            self.perm_maps = objectmodel.PermMappings()
        # We want a change in the information flow direction
        # to be a strong penalty - stronger than access to
        # a few unrelated types.
        self.info_dir_penalty = 100

    def type_distance(self, a, b):
        if a == b or access.is_idparam(b):
            return 0
        else:
            return -self.type_penalty


    def perm_distance(self, av_req, av_prov):
        # First check that we have enough perms
        diff = av_req.perms.difference(av_prov.perms)

        if len(diff) != 0:
            total = self.perm_maps.getdefault_distance(av_req.obj_class, diff)
            return -total
        else:
            diff = av_prov.perms.difference(av_req.perms)
            return self.perm_maps.getdefault_distance(av_req.obj_class, diff)

    def av_distance(self, req, prov):
        """Determine the 'distance' between 2 access vectors.

        This function is used to find an access vector that matches
        a 'required' access. To do this we comput a signed numeric
        value that indicates how close the req access is to the
        'provided' access vector. The closer the value is to 0
        the closer the match, with 0 being an exact match.

        A value over 0 indicates that the prov access vector provides more
        access than the req (in practice, this means that the source type,
        target type, and object class is the same and the perms in prov is
        a superset of those in req.

        A value under 0 indicates that the prov access less - or unrelated
        - access to the req access. A different type or object class will
        result in a very low value.

        The values other than 0 should only be interpreted relative to
        one another - they have no exact meaning and are likely to
        change.

        Params:
          req - [AccessVector] The access that is required. This is the
                access being matched.
          prov - [AccessVector] The access provided. This is the potential
                 match that is being evaluated for req.
        Returns:
          0   : Exact match between the acess vectors.

          < 0 : The prov av does not provide all of the access in req.
                A smaller value indicates that the access is further.

          > 0 : The prov av provides more access than req. The larger
                the value the more access over req.
        """
        # FUTURE - this is _very_ expensive and probably needs some
        # thorough performance work. This version is meant to give
        # meaningful results relatively simply.
        dist = 0

        # Get the difference between the types. The addition is safe
        # here because type_distance only returns 0 or negative.
        dist += self.type_distance(req.src_type, prov.src_type)
        dist += self.type_distance(req.tgt_type, prov.tgt_type)

        # Object class distance
        if req.obj_class != prov.obj_class and not access.is_idparam(prov.obj_class):
            dist -= self.obj_penalty

        # Permission distance

        # If this av doesn't have a matching source type, target type, and object class
        # count all of the permissions against it. Otherwise determine the perm
        # distance and dir.
        if dist < 0:
            pdist = self.perm_maps.getdefault_distance(prov.obj_class, prov.perms)
        else:
            pdist = self.perm_distance(req, prov)

        # Combine the perm and other distance
        if dist < 0:
            if pdist < 0:
                return dist + pdist
            else:
                return dist - pdist
        elif dist >= 0:
            if pdist < 0:
                return pdist - dist
            else:
                return dist + pdist

    def av_set_match(self, av_set, av):
        """

        """
        dist = None

        # Get the distance for each access vector
        for x in av_set:
            tmp = self.av_distance(av, x)
            if dist is None:
                dist = tmp
            elif tmp >= 0:
                if dist >= 0:
                    dist += tmp
                else:
                    dist = tmp + -dist
            else:
                if dist < 0:
                    dist += tmp
                else:
                    dist -= tmp

        # Penalize for information flow - we want to prevent the
        # addition of a write if the requested is read none. We are
        # much less concerned about the reverse.
        av_dir = self.perm_maps.getdefault_direction(av.obj_class, av.perms)

        if av_set.info_dir is None:
            av_set.info_dir = objectmodel.FLOW_NONE
            for x in av_set:
                av_set.info_dir = av_set.info_dir | \
                                  self.perm_maps.getdefault_direction(x.obj_class, x.perms)
        if (av_dir & objectmodel.FLOW_WRITE == 0) and (av_set.info_dir & objectmodel.FLOW_WRITE):
            if dist < 0:
                dist -= self.info_dir_penalty
            else:
                dist += self.info_dir_penalty

        return dist

    def search_ifs(self, ifset, av, match_list):
        match_list.av = av
        for iv in itertools.chain(ifset.tgt_type_all,
                                  ifset.tgt_type_map.get(av.tgt_type, [])):
            if not iv.enabled:
                #print "iv %s not enabled" % iv.name
                continue

            dist = self.av_set_match(iv.access, av)
            if dist >= 0:
                m = Match(iv, dist)
                match_list.append(m)


        match_list.sort()


# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

"""
classes and algorithms for the generation of SELinux policy.
"""

import itertools
import textwrap

import selinux.audit2why as audit2why
try:
    from setools import *
except:
    pass

from . import refpolicy
from . import objectmodel
from . import access
from . import interfaces
from . import matching
from . import util
# Constants for the level of explanation from the generation
# routines
NO_EXPLANATION    = 0
SHORT_EXPLANATION = 1
LONG_EXPLANATION  = 2

class PolicyGenerator:
    """Generate a reference policy module from access vectors.

    PolicyGenerator generates a new reference policy module
    or updates an existing module based on requested access
    in the form of access vectors.

    It generates allow rules and optionally module require
    statements, reference policy interfaces, and extended
    permission access vector rules. By default only allow rules
    are generated. The methods .set_gen_refpol, .set_gen_requires
    and .set_gen_xperms turns on interface generation,
    requires generation, and xperms rules genration respectively.

    PolicyGenerator can also optionally add comments explaining
    why a particular access was allowed based on the audit
    messages that generated the access. The access vectors
    passed in must have the .audit_msgs field set correctly
    and .explain set to SHORT|LONG_EXPLANATION to enable this
    feature.

    The module created by PolicyGenerator can be passed to
    output.ModuleWriter to output a text representation.
    """
    def __init__(self, module=None):
        """Initialize a PolicyGenerator with an optional
        existing module.

        If the module paramater is not None then access
        will be added to the passed in module. Otherwise
        a new reference policy module will be created.
        """
        self.ifgen = None
        self.explain = NO_EXPLANATION
        self.gen_requires = False
        if module:
            self.module = module
        else:
            self.module = refpolicy.Module()

        self.dontaudit = False
        self.xperms = False

        self.domains = None
    def set_gen_refpol(self, if_set=None, perm_maps=None):
        """Set whether reference policy interfaces are generated.

        To turn on interface generation pass in an interface set
        to use for interface generation. To turn off interface
        generation pass in None.

        If interface generation is enabled requires generation
        will also be enabled.
        """
        if if_set:
            self.ifgen = InterfaceGenerator(if_set, perm_maps)
            self.gen_requires = True
        else:
            self.ifgen = None
        self.__set_module_style()


    def set_gen_requires(self, status=True):
        """Set whether module requires are generated.

        Passing in true will turn on requires generation and
        False will disable generation. If requires generation is
        disabled interface generation will also be disabled and
        can only be re-enabled via .set_gen_refpol.
        """
        self.gen_requires = status

    def set_gen_explain(self, explain=SHORT_EXPLANATION):
        """Set whether access is explained.
        """
        self.explain = explain

    def set_gen_dontaudit(self, dontaudit):
        self.dontaudit = dontaudit

    def set_gen_xperms(self, xperms):
        """Set whether extended permission access vector rules
        are generated.
        """
        self.xperms = xperms

    def __set_module_style(self):
        if self.ifgen:
            refpolicy = True
        else:
            refpolicy = False
        for mod in self.module.module_declarations():
            mod.refpolicy = refpolicy

    def set_module_name(self, name, version="1.0"):
        """Set the name of the module and optionally the version.
        """
        # find an existing module declaration
        m = None
        for mod in self.module.module_declarations():
            m = mod
        if not m:
            m = refpolicy.ModuleDeclaration()
            self.module.children.insert(0, m)
        m.name = name
        m.version = version
        if self.ifgen:
            m.refpolicy = True
        else:
            m.refpolicy = False

    def get_module(self):
        # Generate the requires
        if self.gen_requires:
            gen_requires(self.module)

        """Return the generated module"""
        return self.module

    def __add_av_rule(self, av):
        """Add access vector rule.
        """
        rule = refpolicy.AVRule(av)

        if self.dontaudit:
            rule.rule_type = rule.DONTAUDIT
        rule.comment = ""
        if self.explain:
            rule.comment = str(refpolicy.Comment(explain_access(av, verbosity=self.explain)))

        if av.type == audit2why.ALLOW:
            rule.comment += "\n#!!!! This avc is allowed in the current policy"

            if av.xperms:
                rule.comment += "\n#!!!! This av rule may have been overridden by an extended permission av rule"

        if av.type == audit2why.DONTAUDIT:
            rule.comment += "\n#!!!! This avc has a dontaudit rule in the current policy"

        if av.type == audit2why.BOOLEAN:
            if len(av.data) > 1:
                rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n#     %s" % ", ".join([x[0] for x in av.data])
            else:
                rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]

        if av.type == audit2why.CONSTRAINT:
            rule.comment += "\n#!!!! This avc is a constraint violation.  You would need to modify the attributes of either the source or target types to allow this access."
            rule.comment += "\n#Constraint rule: "
            rule.comment += "\n#\t" + av.data[0]
            for reason in av.data[1:]:
                rule.comment += "\n#\tPossible cause is the source %s and target %s are different." % reason

        try:
            if ( av.type == audit2why.TERULE and
                 "write" in av.perms and
                 ( "dir" in av.obj_class or "open" in av.perms )):
                if not self.domains:
                    self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
                types=[]

                for i in [x[TCONTEXT] for x in sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})]:
                    if i not in self.domains:
                        types.append(i)
                if len(types) == 1:
                    rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following type:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
                elif len(types) >= 1:
                    rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following types:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
        except:
            pass

        self.module.children.append(rule)

    def __add_ext_av_rules(self, av):
        """Add extended permission access vector rules.
        """
        for op in av.xperms.keys():
            extrule = refpolicy.AVExtRule(av, op)

            if self.dontaudit:
                extrule.rule_type = extrule.DONTAUDITXPERM

            self.module.children.append(extrule)

    def add_access(self, av_set):
        """Add the access from the access vector set to this
        module.
        """
        # Use the interface generator to split the access
        # into raw allow rules and interfaces. After this
        # a will contain a list of access that should be
        # used as raw allow rules and the interfaces will
        # be added to the module.
        if self.ifgen:
            raw_allow, ifcalls = self.ifgen.gen(av_set, self.explain)
            self.module.children.extend(ifcalls)
        else:
            raw_allow = av_set

        # Generate the raw allow rules from the filtered list
        for av in raw_allow:
            self.__add_av_rule(av)
            if self.xperms and av.xperms:
                self.__add_ext_av_rules(av)

    def add_role_types(self, role_type_set):
        for role_type in role_type_set:
            self.module.children.append(role_type)

def explain_access(av, ml=None, verbosity=SHORT_EXPLANATION):
    """Explain why a policy statement was generated.

    Return a string containing a text explanation of
    why a policy statement was generated. The string is
    commented and wrapped and can be directly inserted
    into a policy.

    Params:
      av - access vector representing the access. Should
       have .audit_msgs set appropriately.
      verbosity - the amount of explanation provided. Should
       be set to NO_EXPLANATION, SHORT_EXPLANATION, or
       LONG_EXPLANATION.
    Returns:
      list of strings - strings explaining the access or an empty
       string if verbosity=NO_EXPLANATION or there is not sufficient
       information to provide an explanation.
    """
    s = []

    def explain_interfaces():
        if not ml:
            return
        s.append(" Interface options:")
        for match in ml.all():
            ifcall = call_interface(match.interface, ml.av)
            s.append('   %s # [%d]' % (ifcall.to_string(), match.dist))


    # Format the raw audit data to explain why the
    # access was requested - either long or short.
    if verbosity == LONG_EXPLANATION:
        for msg in av.audit_msgs:
            s.append(' %s' % msg.header)
            s.append('  scontext="%s" tcontext="%s"' %
                     (str(msg.scontext), str(msg.tcontext)))
            s.append('  class="%s" perms="%s"' %
                     (msg.tclass, refpolicy.list_to_space_str(msg.accesses)))
            s.append('  comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
            s.extend(textwrap.wrap('message="' + msg.message + '"', 80, initial_indent="  ",
                                   subsequent_indent="   "))
        explain_interfaces()
    elif verbosity:
        s.append(' src="%s" tgt="%s" class="%s", perms="%s"' %
                 (av.src_type, av.tgt_type, av.obj_class, av.perms.to_space_str()))
        # For the short display we are only going to use the additional information
        # from the first audit message. For the vast majority of cases this info
        # will always be the same anyway.
        if len(av.audit_msgs) > 0:
            msg = av.audit_msgs[0]
            s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
        explain_interfaces()
    return s

def call_interface(interface, av):
    params = []
    args = []

    params.extend(interface.params.values())
    params.sort(key=lambda param: param.num, reverse=True)

    ifcall = refpolicy.InterfaceCall()
    ifcall.ifname = interface.name

    for i in range(len(params)):
        if params[i].type == refpolicy.SRC_TYPE:
            ifcall.args.append(av.src_type)
        elif params[i].type == refpolicy.TGT_TYPE:
            ifcall.args.append(av.tgt_type)
        elif params[i].type == refpolicy.OBJ_CLASS:
            ifcall.args.append(av.obj_class)
        else:
            print(params[i].type)
            assert(0)

    assert(len(ifcall.args) > 0)

    return ifcall

class InterfaceGenerator:
    def __init__(self, ifs, perm_maps=None):
        self.ifs = ifs
        self.hack_check_ifs(ifs)
        self.matcher = matching.AccessMatcher(perm_maps)
        self.calls = []

    def hack_check_ifs(self, ifs):
        # FIXME: Disable interfaces we can't call - this is a hack.
        # Because we don't handle roles, multiple paramaters, etc.,
        # etc., we must make certain we can actually use a returned
        # interface.
        for x in ifs.interfaces.values():
            params = []
            params.extend(x.params.values())
            params.sort(key=lambda param: param.num, reverse=True)
            for i in range(len(params)):
                # Check that the paramater position matches
                # the number (e.g., $1 is the first arg). This
                # will fail if the parser missed something.
                if (i + 1) != params[i].num:
                    x.enabled = False
                    break
                # Check that we can handle the param type (currently excludes
                # roles.
                if params[i].type not in [refpolicy.SRC_TYPE, refpolicy.TGT_TYPE,
                                          refpolicy.OBJ_CLASS]:
                    x.enabled = False
                    break

    def gen(self, avs, verbosity):
        raw_av = self.match(avs)
        ifcalls = []
        for ml in self.calls:
            ifcall = call_interface(ml.best().interface, ml.av)
            if verbosity:
                ifcall.comment = refpolicy.Comment(explain_access(ml.av, ml, verbosity))
            ifcalls.append((ifcall, ml))

        d = []
        for ifcall, ifs in ifcalls:
            found = False
            for o_ifcall in d:
                if o_ifcall.matches(ifcall):
                    if o_ifcall.comment and ifcall.comment:
                        o_ifcall.comment.merge(ifcall.comment)
                    found = True
            if not found:
                d.append(ifcall)

        return (raw_av, d)


    def match(self, avs):
        raw_av = []
        for av in avs:
            ans = matching.MatchList()
            self.matcher.search_ifs(self.ifs, av, ans)
            if len(ans):
                self.calls.append(ans)
            else:
                raw_av.append(av)

        return raw_av


def gen_requires(module):
    """Add require statements to the module.
    """
    def collect_requires(node):
        r = refpolicy.Require()
        for avrule in node.avrules():
            r.types.update(avrule.src_types)
            r.types.update(avrule.tgt_types)
            for obj in avrule.obj_classes:
                r.add_obj_class(obj, avrule.perms)

        for ifcall in node.interface_calls():
            for arg in ifcall.args:
                # FIXME - handle non-type arguments when we
                # can actually figure those out.
                r.types.add(arg)

        for role_type in node.role_types():
            r.roles.add(role_type.role)
            r.types.update(role_type.types)
                
        r.types.discard("self")

        node.children.insert(0, r)

    # FUTURE - this is untested on modules with any sort of
    # nesting
    for node in module.nodes():
        collect_requires(node)


# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006-2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

# OVERVIEW
#
#
# This is a parser for the refpolicy policy "language" - i.e., the
# normal SELinux policy language plus the refpolicy style M4 macro
# constructs on top of that base language. This parser is primarily
# aimed at parsing the policy headers in order to create an abstract
# policy representation suitable for generating policy.
#
# Both the lexer and parser are included in this file. The are implemented
# using the Ply library (included with sepolgen).

import sys
import os
import re
import traceback

from . import access
from . import defaults
from . import lex
from . import refpolicy
from . import yacc

# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# lexer
#
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

tokens = (
    # basic tokens, punctuation
    'TICK',
    'SQUOTE',
    'OBRACE',
    'CBRACE',
    'SEMI',
    'COLON',
    'OPAREN',
    'CPAREN',
    'COMMA',
    'MINUS',
    'TILDE',
    'ASTERISK',
    'AMP',
    'BAR',
    'EXPL',
    'EQUAL',
    'FILENAME',
    'IDENTIFIER',
    'NUMBER',
    'PATH',
    'IPV6_ADDR',
    # reserved words
    #   module
    'MODULE',
    'POLICY_MODULE',
    'REQUIRE',
    #   flask
    'SID',
    'GENFSCON',
    'FS_USE_XATTR',
    'FS_USE_TRANS',
    'FS_USE_TASK',
    'PORTCON',
    'NODECON',
    'NETIFCON',
    'PIRQCON',
    'IOMEMCON',
    'IOPORTCON',
    'PCIDEVICECON',
    'DEVICETREECON',
    #   object classes
    'CLASS',
    #   types and attributes
    'TYPEATTRIBUTE',
    'ROLEATTRIBUTE',
    'TYPE',
    'ATTRIBUTE',
    'ATTRIBUTE_ROLE',
    'ALIAS',
    'TYPEALIAS',
    #   conditional policy
    'BOOL',
    'TRUE',
    'FALSE',
    'IF',
    'ELSE',
    #   users and roles
    'ROLE',
    'TYPES',
    #   rules
    'ALLOW',
    'DONTAUDIT',
    'AUDITALLOW',
    'NEVERALLOW',
    'PERMISSIVE',
    'TYPEBOUNDS',
    'TYPE_TRANSITION',
    'TYPE_CHANGE',
    'TYPE_MEMBER',
    'RANGE_TRANSITION',
    'ROLE_TRANSITION',
    #   refpolicy keywords
    'OPT_POLICY',
    'INTERFACE',
    'TUNABLE_POLICY',
    'GEN_REQ',
    'TEMPLATE',
    'GEN_CONTEXT',
    #   m4
    'IFELSE',
    'IFDEF',
    'IFNDEF',
    'DEFINE'
    )

# All reserved keywords - see t_IDENTIFIER for how these are matched in
# the lexer.
reserved = {
    # module
    'module' : 'MODULE',
    'policy_module' : 'POLICY_MODULE',
    'require' : 'REQUIRE',
    # flask
    'sid' : 'SID',
    'genfscon' : 'GENFSCON',
    'fs_use_xattr' : 'FS_USE_XATTR',
    'fs_use_trans' : 'FS_USE_TRANS',
    'fs_use_task' : 'FS_USE_TASK',
    'portcon' : 'PORTCON',
    'nodecon' : 'NODECON',
    'netifcon' : 'NETIFCON',
    'pirqcon' : 'PIRQCON',
    'iomemcon' : 'IOMEMCON',
    'ioportcon' : 'IOPORTCON',
    'pcidevicecon' : 'PCIDEVICECON',
    'devicetreecon' : 'DEVICETREECON',
    # object classes
    'class' : 'CLASS',
    # types and attributes
    'typeattribute' : 'TYPEATTRIBUTE',
    'roleattribute' : 'ROLEATTRIBUTE',
    'type' : 'TYPE',
    'attribute' : 'ATTRIBUTE',
    'attribute_role' : 'ATTRIBUTE_ROLE',
    'alias' : 'ALIAS',
    'typealias' : 'TYPEALIAS',
    # conditional policy
    'bool' : 'BOOL',
    'true' : 'TRUE',
    'false' : 'FALSE',
    'if' : 'IF',
    'else' : 'ELSE',
    # users and roles
    'role' : 'ROLE',
    'types' : 'TYPES',
    # rules
    'allow' : 'ALLOW',
    'dontaudit' : 'DONTAUDIT',
    'auditallow' : 'AUDITALLOW',
    'neverallow' : 'NEVERALLOW',
    'permissive' : 'PERMISSIVE',
    'typebounds' : 'TYPEBOUNDS',
    'type_transition' : 'TYPE_TRANSITION',
    'type_change' : 'TYPE_CHANGE',
    'type_member' : 'TYPE_MEMBER',
    'range_transition' : 'RANGE_TRANSITION',
    'role_transition' : 'ROLE_TRANSITION',
    # refpolicy keywords
    'optional_policy' : 'OPT_POLICY',
    'interface' : 'INTERFACE',
    'tunable_policy' : 'TUNABLE_POLICY',
    'gen_require' : 'GEN_REQ',
    'template' : 'TEMPLATE',
    'gen_context' : 'GEN_CONTEXT',
    # M4
    'ifelse' : 'IFELSE',
    'ifndef' : 'IFNDEF',
    'ifdef' : 'IFDEF',
    'define' : 'DEFINE'
    }

# The ply lexer allows definition of tokens in 2 ways: regular expressions
# or functions.

# Simple regex tokens
t_TICK      = r'\`'
t_SQUOTE    = r'\''
t_OBRACE    = r'\{'
t_CBRACE    = r'\}'
# This will handle spurios extra ';' via the +
t_SEMI      = r'\;+'
t_COLON     = r'\:'
t_OPAREN    = r'\('
t_CPAREN    = r'\)'
t_COMMA     = r'\,'
t_MINUS     = r'\-'
t_TILDE     = r'\~'
t_ASTERISK  = r'\*'
t_AMP       = r'\&'
t_BAR       = r'\|'
t_EXPL      = r'\!'
t_EQUAL     = r'\='
t_NUMBER    = r'[0-9\.]+'
t_PATH      = r'/[a-zA-Z0-9)_\.\*/\$]*'
#t_IPV6_ADDR = r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]{0,4}:)*'

# Ignore whitespace - this is a special token for ply that more efficiently
# ignores uninteresting tokens.
t_ignore    = " \t"

# More complex tokens
def t_IPV6_ADDR(t):
    r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]|:)*'
    # This is a function simply to force it sooner into
    # the regex list
    return t

def t_m4comment(t):
    r'dnl.*\n'
    # Ignore all comments
    t.lexer.lineno += 1

def t_refpolicywarn1(t):
    r'define.*refpolicywarn\(.*\n'
    # Ignore refpolicywarn statements - they sometimes
    # contain text that we can't parse.
    t.skip(1)

def t_refpolicywarn(t):
    r'refpolicywarn\(.*\n'
    # Ignore refpolicywarn statements - they sometimes
    # contain text that we can't parse.
    t.lexer.lineno += 1

def t_IDENTIFIER(t):
    r'[a-zA-Z_\$][a-zA-Z0-9_\-\+\.\$\*~]*'
    # Handle any keywords
    t.type = reserved.get(t.value,'IDENTIFIER')
    return t

def t_FILENAME(t):
    r'\"[a-zA-Z0-9_\-\+\.\$\*~ :]+\"'
    # Handle any keywords
    t.type = reserved.get(t.value,'FILENAME')
    return t

def t_comment(t):
    r'\#.*\n'
    # Ignore all comments
    t.lexer.lineno += 1

def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.skip(1)

def t_newline(t):
    r'\n+'
    t.lexer.lineno += len(t.value)

# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# Parser
#
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

# Global data used during parsing - making it global is easier than
# passing the state through the parsing functions.

#   m is the top-level data structure (stands for modules).
m = None
#   error is either None (indicating no error) or a string error message.
error = None
parse_file = ""
#   spt is the support macros (e.g., obj/perm sets) - it is an instance of
#     refpolicy.SupportMacros and should always be present during parsing
#     though it may not contain any macros.
spt = None
success = True

# utilities
def collect(stmts, parent, val=None):
    if stmts is None:
        return
    for s in stmts:
        if s is None:
            continue
        s.parent = parent
        if val is not None:
            parent.children.insert(0, (val, s))
        else:
            parent.children.insert(0, s)

def expand(ids, s):
    for id in ids:
        if spt.has_key(id):  # noqa
            s.update(spt.by_name(id))
        else:
            s.add(id)

# Top-level non-terminal
def p_statements(p):
    '''statements : statement
                  | statements statement
                  | empty
    '''
    if len(p) == 2 and p[1]:
        m.children.append(p[1])
    elif len(p) > 2 and p[2]:
        m.children.append(p[2])

def p_statement(p):
    '''statement : interface
                 | template
                 | obj_perm_set
                 | policy
                 | policy_module_stmt
                 | module_stmt
    '''
    p[0] = p[1]

def p_empty(p):
    'empty :'
    pass

#
# Reference policy language constructs
#

# This is for the policy module statement (e.g., policy_module(foo,1.2.0)).
# We have a separate terminal for either the basic language module statement
# and interface calls to make it easier to identifier.
def p_policy_module_stmt(p):
    'policy_module_stmt : POLICY_MODULE OPAREN IDENTIFIER COMMA NUMBER CPAREN'
    m = refpolicy.ModuleDeclaration()
    m.name = p[3]
    m.version = p[5]
    m.refpolicy = True
    p[0] = m

def p_interface(p):
    '''interface : INTERFACE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
    '''
    x = refpolicy.Interface(p[4])
    collect(p[8], x)
    p[0] = x

def p_template(p):
    '''template : TEMPLATE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
                | DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
    '''
    x = refpolicy.Template(p[4])
    collect(p[8], x)
    p[0] = x

def p_define(p):
    '''define : DEFINE OPAREN TICK IDENTIFIER SQUOTE CPAREN'''
    # This is for defining single M4 values (to be used later in ifdef statements).
    # Example: define(`sulogin_no_pam'). We don't currently do anything with these
    # but we should in the future when we correctly resolve ifdef statements.
    p[0] = None

def p_interface_stmts(p):
    '''interface_stmts : policy
                       | interface_stmts policy
                       | empty
    '''
    if len(p) == 2 and p[1]:
        p[0] = p[1]
    elif len(p) > 2:
        if not p[1]:
            if p[2]:
                p[0] = p[2]
        elif not p[2]:
            p[0] = p[1]
        else:
            p[0] = p[1] + p[2]

def p_optional_policy(p):
    '''optional_policy : OPT_POLICY OPAREN TICK interface_stmts SQUOTE CPAREN
                       | OPT_POLICY OPAREN TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
    '''
    o = refpolicy.OptionalPolicy()
    collect(p[4], o, val=True)
    if len(p) > 7:
        collect(p[8], o, val=False)
    p[0] = [o]

def p_tunable_policy(p):
    '''tunable_policy : TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
                      | TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
    '''
    x = refpolicy.TunablePolicy()
    x.cond_expr = p[4]
    collect(p[8], x, val=True)
    if len(p) > 11:
        collect(p[12], x, val=False)
    p[0] = [x]

def p_ifelse(p):
    '''ifelse : IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
              | IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
              | IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
    '''
#    x = refpolicy.IfDef(p[4])
#    v = True
#    collect(p[8], x, val=v)
#    if len(p) > 12:
#        collect(p[12], x, val=False)
#    p[0] = [x]
    pass


def p_ifdef(p):
    '''ifdef : IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK statements SQUOTE CPAREN optional_semi
             | IFNDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK statements SQUOTE CPAREN optional_semi
             | IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK statements SQUOTE COMMA TICK statements SQUOTE CPAREN optional_semi
    '''
    x = refpolicy.IfDef(p[4])
    if p[1] == 'ifdef':
        v = True
    else:
        v = False
    collect(p[8], x, val=v)
    if len(p) > 12:
        collect(p[12], x, val=False)
    p[0] = [x]

def p_interface_call(p):
    '''interface_call : IDENTIFIER OPAREN interface_call_param_list CPAREN
                      | IDENTIFIER OPAREN CPAREN
                      | IDENTIFIER OPAREN interface_call_param_list CPAREN SEMI'''
    # Allow spurious semi-colons at the end of interface calls
    i = refpolicy.InterfaceCall(ifname=p[1])
    if len(p) > 4:
        i.args.extend(p[3])
    p[0] = i

def p_interface_call_param(p):
    '''interface_call_param : IDENTIFIER
                            | IDENTIFIER MINUS IDENTIFIER
                            | nested_id_set
                            | TRUE
                            | FALSE
                            | FILENAME
    '''
    # Intentionally let single identifiers pass through
    # List means set, non-list identifier
    if len(p) == 2:
        p[0] = p[1]
    else:
        p[0] = [p[1], "-" + p[3]]

def p_interface_call_param_list(p):
    '''interface_call_param_list : interface_call_param
                                 | interface_call_param_list COMMA interface_call_param
    '''
    if len(p) == 2:
        p[0] = [p[1]]
    else:
        p[0] = p[1] + [p[3]]


def p_obj_perm_set(p):
    'obj_perm_set : DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK names SQUOTE CPAREN'
    s = refpolicy.ObjPermSet(p[4])
    s.perms = p[8]
    p[0] = s
    
#
# Basic SELinux policy language
#

def p_policy(p):
    '''policy : policy_stmt
              | optional_policy
              | tunable_policy
              | ifdef
              | ifelse
              | conditional
    '''
    p[0] = p[1]

def p_policy_stmt(p):
    '''policy_stmt : gen_require
                   | avrule_def
                   | typerule_def
                   | typebound_def
                   | typeattribute_def
                   | roleattribute_def
                   | interface_call
                   | role_def
                   | role_allow
                   | permissive
                   | type_def
                   | typealias_def
                   | attribute_def
                   | attribute_role_def
                   | range_transition_def
                   | role_transition_def
                   | bool
                   | define
                   | initial_sid
                   | genfscon
                   | fs_use
                   | portcon
                   | nodecon
                   | netifcon
                   | pirqcon
                   | iomemcon
                   | ioportcon
                   | pcidevicecon
                   | devicetreecon
    '''
    if p[1]:
        p[0] = [p[1]]

def p_module_stmt(p):
    'module_stmt : MODULE IDENTIFIER NUMBER SEMI'
    m = refpolicy.ModuleDeclaration()
    m.name = p[2]
    m.version = p[3]
    m.refpolicy = False
    p[0] = m

def p_gen_require(p):
    '''gen_require : GEN_REQ OPAREN TICK requires SQUOTE CPAREN
                   | REQUIRE OBRACE requires CBRACE'''
    # We ignore the require statements - they are redundant data from our point-of-view.
    # Checkmodule will verify them later anyway so we just assume that they match what
    # is in the rest of the interface.
    pass

def p_requires(p):
    '''requires : require
                | requires require
                | ifdef
                | requires ifdef
    '''
    pass

def p_require(p):
    '''require : TYPE comma_list SEMI
               | ROLE comma_list SEMI
               | ATTRIBUTE comma_list SEMI
               | ATTRIBUTE_ROLE comma_list SEMI
               | CLASS comma_list SEMI
               | BOOL comma_list SEMI
    '''
    pass

def p_security_context(p):
    '''security_context : IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER
                        | IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER COLON mls_range_def'''
    # This will likely need some updates to handle complex levels
    s = refpolicy.SecurityContext()
    s.user = p[1]
    s.role = p[3]
    s.type = p[5]
    if len(p) > 6:
        s.level = p[7]

    p[0] = s

def p_gen_context(p):
    '''gen_context : GEN_CONTEXT OPAREN security_context COMMA mls_range_def CPAREN
    '''
    # We actually store gen_context statements in a SecurityContext
    # object - it knows how to output either a bare context or a
    # gen_context statement.
    s = p[3]
    s.level = p[5]
    
    p[0] = s

def p_context(p):
    '''context : security_context
               | gen_context
    '''
    p[0] = p[1]

def p_initial_sid(p):
    '''initial_sid : SID IDENTIFIER context'''
    s = refpolicy.InitialSid()
    s.name = p[2]
    s.context = p[3]
    p[0] = s

def p_genfscon(p):
    '''genfscon : GENFSCON IDENTIFIER PATH context'''
    
    g = refpolicy.GenfsCon()
    g.filesystem = p[2]
    g.path = p[3]
    g.context = p[4]

    p[0] = g

def p_fs_use(p):
    '''fs_use : FS_USE_XATTR IDENTIFIER context SEMI
              | FS_USE_TASK IDENTIFIER context SEMI
              | FS_USE_TRANS IDENTIFIER context SEMI
    '''
    f = refpolicy.FilesystemUse()
    if p[1] == "fs_use_xattr":
        f.type = refpolicy.FilesystemUse.XATTR
    elif p[1] == "fs_use_task":
        f.type = refpolicy.FilesystemUse.TASK
    elif p[1] == "fs_use_trans":
        f.type = refpolicy.FilesystemUse.TRANS

    f.filesystem = p[2]
    f.context = p[3]

    p[0] = f

def p_portcon(p):
    '''portcon : PORTCON IDENTIFIER NUMBER context
               | PORTCON IDENTIFIER NUMBER MINUS NUMBER context'''
    c = refpolicy.PortCon()
    c.port_type = p[2]
    if len(p) == 5:
        c.port_number = p[3]
        c.context = p[4]
    else:
        c.port_number = p[3] + "-" + p[4]
        c.context = p[5]

    p[0] = c

def p_nodecon(p):
    '''nodecon : NODECON NUMBER NUMBER context
               | NODECON IPV6_ADDR IPV6_ADDR context
    '''
    n = refpolicy.NodeCon()
    n.start = p[2]
    n.end = p[3]
    n.context = p[4]

    p[0] = n

def p_netifcon(p):
    'netifcon : NETIFCON IDENTIFIER context context'
    n = refpolicy.NetifCon()
    n.interface = p[2]
    n.interface_context = p[3]
    n.packet_context = p[4]

    p[0] = n

def p_pirqcon(p):
    'pirqcon : PIRQCON NUMBER context'
    c = refpolicy.PirqCon()
    c.pirq_number = p[2]
    c.context = p[3]

    p[0] = c

def p_iomemcon(p):
    '''iomemcon : IOMEMCON NUMBER context
                | IOMEMCON NUMBER MINUS NUMBER context'''
    c = refpolicy.IomemCon()
    if len(p) == 4:
        c.device_mem = p[2]
        c.context = p[3]
    else:
        c.device_mem = p[2] + "-" + p[3]
        c.context = p[4]

    p[0] = c

def p_ioportcon(p):
    '''ioportcon : IOPORTCON NUMBER context
                | IOPORTCON NUMBER MINUS NUMBER context'''
    c = refpolicy.IoportCon()
    if len(p) == 4:
        c.ioport = p[2]
        c.context = p[3]
    else:
        c.ioport = p[2] + "-" + p[3]
        c.context = p[4]

    p[0] = c

def p_pcidevicecon(p):
    'pcidevicecon : PCIDEVICECON NUMBER context'
    c = refpolicy.PciDeviceCon()
    c.device = p[2]
    c.context = p[3]

    p[0] = c

def p_devicetreecon(p):
    'devicetreecon : DEVICETREECON NUMBER context'
    c = refpolicy.DevicetTeeCon()
    c.path = p[2]
    c.context = p[3]

    p[0] = c

def p_mls_range_def(p):
    '''mls_range_def : mls_level_def MINUS mls_level_def
                     | mls_level_def
    '''
    p[0] = p[1]
    if len(p) > 2:
        p[0] = p[0] + "-" + p[3]

def p_mls_level_def(p):
    '''mls_level_def : IDENTIFIER COLON comma_list
                     | IDENTIFIER
    '''
    p[0] = p[1]
    if len(p) > 2:
        p[0] = p[0] + ":" + ",".join(p[3])
    
def p_type_def(p):
    '''type_def : TYPE IDENTIFIER COMMA comma_list SEMI
                | TYPE IDENTIFIER SEMI
                | TYPE IDENTIFIER ALIAS names SEMI
                | TYPE IDENTIFIER ALIAS names COMMA comma_list SEMI
    '''
    t = refpolicy.Type(p[2])
    if len(p) == 6:
        if p[3] == ',':
            t.attributes.update(p[4])
        else:
            t.aliases = p[4]
    elif len(p) > 4:
        t.aliases = p[4]
        if len(p) == 8:
            t.attributes.update(p[6])
    p[0] = t

def p_attribute_def(p):
    'attribute_def : ATTRIBUTE IDENTIFIER SEMI'
    a = refpolicy.Attribute(p[2])
    p[0] = a

def p_attribute_role_def(p):
    'attribute_role_def : ATTRIBUTE_ROLE IDENTIFIER SEMI'
    a = refpolicy.Attribute_Role(p[2])
    p[0] = a

def p_typealias_def(p):
    'typealias_def : TYPEALIAS IDENTIFIER ALIAS names SEMI'
    t = refpolicy.TypeAlias()
    t.type = p[2]
    t.aliases = p[4]
    p[0] = t

def p_role_def(p):
    '''role_def : ROLE IDENTIFIER TYPES comma_list SEMI
                | ROLE IDENTIFIER SEMI'''
    r = refpolicy.Role()
    r.role = p[2]
    if len(p) > 4:
        r.types.update(p[4])
    p[0] = r

def p_role_allow(p):
    'role_allow : ALLOW names names SEMI'
    r = refpolicy.RoleAllow()
    r.src_roles = p[2]
    r.tgt_roles = p[3]
    p[0] = r

def p_permissive(p):
    'permissive : PERMISSIVE names SEMI'
    pass

def p_avrule_def(p):
    '''avrule_def : ALLOW names names COLON names names SEMI
                  | DONTAUDIT names names COLON names names SEMI
                  | AUDITALLOW names names COLON names names SEMI
                  | NEVERALLOW names names COLON names names SEMI
    '''
    a = refpolicy.AVRule()
    if p[1] == 'dontaudit':
        a.rule_type = refpolicy.AVRule.DONTAUDIT
    elif p[1] == 'auditallow':
        a.rule_type = refpolicy.AVRule.AUDITALLOW
    elif p[1] == 'neverallow':
        a.rule_type = refpolicy.AVRule.NEVERALLOW
    a.src_types = p[2]
    a.tgt_types = p[3]
    a.obj_classes = p[5]
    a.perms = p[6]
    p[0] = a

def p_typerule_def(p):
    '''typerule_def : TYPE_TRANSITION names names COLON names IDENTIFIER SEMI
                    | TYPE_TRANSITION names names COLON names IDENTIFIER FILENAME SEMI
                    | TYPE_TRANSITION names names COLON names IDENTIFIER IDENTIFIER SEMI
                    | TYPE_CHANGE names names COLON names IDENTIFIER SEMI
                    | TYPE_MEMBER names names COLON names IDENTIFIER SEMI
    '''
    t = refpolicy.TypeRule()
    if p[1] == 'type_change':
        t.rule_type = refpolicy.TypeRule.TYPE_CHANGE
    elif p[1] == 'type_member':
        t.rule_type = refpolicy.TypeRule.TYPE_MEMBER
    t.src_types = p[2]
    t.tgt_types = p[3]
    t.obj_classes = p[5]
    t.dest_type = p[6]
    t.file_name = p[7]
    p[0] = t

def p_typebound_def(p):
    '''typebound_def : TYPEBOUNDS IDENTIFIER comma_list SEMI'''
    t = refpolicy.TypeBound()
    t.type = p[2]
    t.tgt_types.update(p[3])
    p[0] = t

def p_bool(p):
    '''bool : BOOL IDENTIFIER TRUE SEMI
            | BOOL IDENTIFIER FALSE SEMI'''
    b = refpolicy.Bool()
    b.name = p[2]
    if p[3] == "true":
        b.state = True
    else:
        b.state = False
    p[0] = b

def p_conditional(p):
    ''' conditional : IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE
                    | IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE ELSE OBRACE interface_stmts CBRACE
    '''
    c = refpolicy.Conditional()
    c.cond_expr = p[3]
    collect(p[6], c, val=True)
    if len(p) > 8:
        collect(p[10], c, val=False)
    p[0] = [c]

def p_typeattribute_def(p):
    '''typeattribute_def : TYPEATTRIBUTE IDENTIFIER comma_list SEMI'''
    t = refpolicy.TypeAttribute()
    t.type = p[2]
    t.attributes.update(p[3])
    p[0] = t

def p_roleattribute_def(p):
    '''roleattribute_def : ROLEATTRIBUTE IDENTIFIER comma_list SEMI'''
    t = refpolicy.RoleAttribute()
    t.role = p[2]
    t.roleattributes.update(p[3])
    p[0] = t

def p_range_transition_def(p):
    '''range_transition_def : RANGE_TRANSITION names names COLON names mls_range_def SEMI
                            | RANGE_TRANSITION names names names SEMI'''
    pass

def p_role_transition_def(p):
    '''role_transition_def : ROLE_TRANSITION names names names SEMI'''
    pass

def p_cond_expr(p):
    '''cond_expr : IDENTIFIER
                 | EXPL cond_expr
                 | cond_expr AMP AMP cond_expr
                 | cond_expr BAR BAR cond_expr
                 | cond_expr EQUAL EQUAL cond_expr
                 | cond_expr EXPL EQUAL cond_expr
    '''
    l = len(p)
    if l == 2:
        p[0] = [p[1]]
    elif l == 3:
        p[0] = [p[1]] + p[2]
    else:
        p[0] = p[1] + [p[2] + p[3]] + p[4]


#
# Basic terminals
#

# Identifiers and lists of identifiers. These must
# be handled somewhat gracefully. Names returns an IdSet and care must
# be taken that this is _assigned_ to an object to correctly update
# all of the flags (as opposed to using update). The other terminals
# return list - this is to preserve ordering if it is important for
# parsing (for example, interface_call must retain the ordering). Other
# times the list should be used to update an IdSet.

def p_names(p):
    '''names : identifier
             | nested_id_set
             | asterisk
             | TILDE identifier
             | TILDE nested_id_set
             | IDENTIFIER MINUS IDENTIFIER
    '''
    s = refpolicy.IdSet()
    if len(p) < 3:
        expand(p[1], s)
    elif len(p) == 3:
        expand(p[2], s)
        s.compliment = True
    else:
        expand([p[1]])
        s.add("-" + p[3])
    p[0] = s

def p_identifier(p):
    'identifier : IDENTIFIER'
    p[0] = [p[1]]

def p_asterisk(p):
    'asterisk : ASTERISK'
    p[0] = [p[1]]

def p_nested_id_set(p):
    '''nested_id_set : OBRACE nested_id_list CBRACE
    '''
    p[0] = p[2]

def p_nested_id_list(p):
    '''nested_id_list : nested_id_element
                      | nested_id_list nested_id_element
    '''
    if len(p) == 2:
        p[0] = p[1]
    else:
        p[0] = p[1] + p[2]

def p_nested_id_element(p):
    '''nested_id_element : identifier
                         | MINUS IDENTIFIER
                         | nested_id_set
    '''
    if len(p) == 2:
        p[0] = p[1]
    else:
        # For now just leave the '-'
        str = "-" + p[2]
        p[0] = [str]

def p_comma_list(p):
    '''comma_list : nested_id_list
                  | comma_list COMMA nested_id_list
    '''
    if len(p) > 2:
        p[1] = p[1] + p[3]
    p[0] = p[1]

def p_optional_semi(p):
    '''optional_semi : SEMI
                   | empty'''
    pass


#
# Interface to the parser
#

def p_error(tok):
    global error, parse_file, success, parser
    error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
    print(error)
    success = False

def prep_spt(spt):
    if not spt:
        return { }
    map = {}
    for x in spt:
        map[x.name] = x

parser = None
lexer = None
def create_globals(module, support, debug):
    global parser, lexer, m, spt

    if not parser:
        lexer = lex.lex()
        parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)

    if module is not None:
        m = module
    else:
        m = refpolicy.Module()

    if not support:
        spt = refpolicy.SupportMacros()
    else:
        spt = support

def parse(text, module=None, support=None, debug=False):
    create_globals(module, support, debug)
    global error, parser, lexer, success

    lexer.lineno = 1
    success = True

    try:
        parser.parse(text, debug=debug, lexer=lexer)
    except Exception as e:
        parser = None
        lexer = None
        error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()

    if not success:
        # force the parser and lexer to be rebuilt - we have some problems otherwise
        parser = None
        msg = 'could not parse text: "%s"' % error
        raise ValueError(msg)
    return m

def list_headers(root):
    modules = []
    support_macros = None

    for dirpath, dirnames, filenames in os.walk(root):
        for name in filenames:
            modname = os.path.splitext(name)
            filename = os.path.join(dirpath, name)

            if modname[1] == '.spt':
                if name == "obj_perm_sets.spt":
                    support_macros = filename
                elif len(re.findall("patterns", modname[0])):
                    modules.append((modname[0], filename))
            elif modname[1] == '.if':
                modules.append((modname[0], filename))

    return (modules, support_macros)


def parse_headers(root, output=None, expand=True, debug=False):
    from . import util

    headers = refpolicy.Headers()

    modules = []
    support_macros = None

    if os.path.isfile(root):
        name = os.path.split(root)[1]
        if name == '':
            raise ValueError("Invalid file name %s" % root)
        modname = os.path.splitext(name)
        modules.append((modname[0], root))
        all_modules, support_macros = list_headers(defaults.headers())
    else:
        modules, support_macros = list_headers(root)

    if expand and not support_macros:
        raise ValueError("could not find support macros (obj_perm_sets.spt)")

    def o(msg):
        if output:
            output.write(msg)

    def parse_file(f, module, spt=None):
        global parse_file
        if debug:
            o("parsing file %s\n" % f)
        try:
            fd = open(f)
            txt = fd.read()
            fd.close()
            parse_file = f
            parse(txt, module, spt, debug)
        except IOError as e:
            return
        except ValueError as e:
            raise ValueError("error parsing file %s: %s" % (f, str(e)))

    spt = None
    if support_macros:
        o("Parsing support macros (%s): " % support_macros)
        spt = refpolicy.SupportMacros()
        parse_file(support_macros, spt)

        headers.children.append(spt)

        # FIXME: Total hack - add in can_exec rather than parse the insanity
        # of misc_macros. We are just going to pretend that this is an interface
        # to make the expansion work correctly.
        can_exec = refpolicy.Interface("can_exec")
        av = access.AccessVector(["$1","$2","file","execute_no_trans","open", "read",
                                  "getattr","lock","execute","ioctl"])

        can_exec.children.append(refpolicy.AVRule(av))
        headers.children.append(can_exec)

        o("done.\n")

    if output and not debug:
        status = util.ConsoleProgressBar(sys.stdout, steps=len(modules))
        status.start("Parsing interface files")

    failures = []
    for x in modules:
        m = refpolicy.Module()
        m.name = x[0]
        try:
            if expand:
                parse_file(x[1], m, spt)
            else:
                parse_file(x[1], m)
        except ValueError as e:
            o(str(e) + "\n")
            failures.append(x[1])
            continue

        headers.children.append(m)
        if output and not debug:
            status.step()

    if len(failures):
        o("failed to parse some headers: %s" % ", ".join(failures))

    return headers
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat 
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import locale
import sys


PY3 = sys.version_info[0] == 3

if PY3:
    bytes_type=bytes
    string_type=str
else:
    bytes_type=str
    string_type=unicode


class ConsoleProgressBar:
    def __init__(self, out, steps=100, indicator='#'):
        self.blocks = 0
        self.current = 0
        self.steps = steps
        self.indicator = indicator
        self.out = out
        self.done = False

    def start(self, message=None):
        self.done = False
        if message:
            self.out.write('\n%s:\n' % message)
        self.out.write('%--10---20---30---40---50---60---70---80---90--100\n')

    def step(self, n=1):
        self.current += n

        old = self.blocks
        self.blocks = int(round(self.current / float(self.steps) * 100) / 2)

        if self.blocks > 50:
            self.blocks = 50

        new = self.blocks - old

        self.out.write(self.indicator * new)
        self.out.flush()

        if self.blocks == 50 and not self.done:
            self.done = True
            self.out.write("\n")

def set_to_list(s):
    l = []
    l.extend(s)
    return l

def first(s, sorted=False):
    """
    Return the first element of a set.

    It sometimes useful to return the first element from a set but,
    because sets are not indexable, this is rather hard. This function
    will return the first element from a set. If sorted is True, then
    the set will first be sorted (making this an expensive operation).
    Otherwise a random element will be returned (as sets are not ordered).
    """
    if not len(s):
        raise IndexError("empty containter")
    
    if sorted:
        l = set_to_list(s)
        l.sort()
        return l[0]
    else:
        for x in s:
            return x

def encode_input(text):
    """Encode given text via preferred system encoding"""
    # locale will often find out the correct encoding
    encoding = locale.getpreferredencoding()
    try:
        encoded_text = text.encode(encoding)
    except UnicodeError:
    # if it fails to find correct encoding then ascii is used
    # which may lead to UnicodeError if `text` contains non ascii signs
    # utf-8 is our guess to fix the situation
        encoded_text = text.encode('utf-8')
    return encoded_text

def decode_input(text):
    """Decode given text via preferred system encoding"""
    # locale will often find out the correct encoding
    encoding = locale.getpreferredencoding()
    try:
        decoded_text = text.decode(encoding)
    except UnicodeError:
    # if it fails to find correct encoding then ascii is used
    # which may lead to UnicodeError if `text` contains non ascii signs
    # utf-8 is our guess to fix the situation
        decoded_text = text.decode('utf-8')
    return decoded_text

class Comparison():
    """Class used when implementing rich comparison.

    Inherit from this class if you want to have a rich
    comparison withing the class, afterwards implement
    _compare function within your class."""

    def _compare(self, other, method):
        return NotImplemented

    def __eq__(self, other):
        return self._compare(other, lambda a, b: a == b)

    def __lt__(self, other):
        return self._compare(other, lambda a, b: a < b)

    def __le__(self, other):
        return self._compare(other, lambda a, b: a <= b)

    def __ge__(self, other):
        return self._compare(other, lambda a, b: a >= b)

    def __gt__(self, other):
        return self._compare(other, lambda a, b: a > b)

    def __ne__(self, other):
        return self._compare(other, lambda a, b: a != b)

if sys.version_info < (2,7):
    # cmp_to_key function is missing in python2.6
    def cmp_to_key(mycmp):
        'Convert a cmp= function into a key= function'
        class K:
            def __init__(self, obj, *args):
                self.obj = obj
            def __lt__(self, other):
                return mycmp(self.obj, other.obj) < 0
            def __gt__(self, other):
                return mycmp(self.obj, other.obj) > 0
            def __eq__(self, other):
                return mycmp(self.obj, other.obj) == 0
            def __le__(self, other):
                return mycmp(self.obj, other.obj) <= 0
            def __ge__(self, other):
                return mycmp(self.obj, other.obj) >= 0
            def __ne__(self, other):
                return mycmp(self.obj, other.obj) != 0
        return K
else:
    from functools import cmp_to_key

def cmp(first, second):
    return (first > second) - (second > first)

if __name__ == "__main__":
    import time
    p = ConsoleProgressBar(sys.stdout, steps=999)
    p.start("computing pi")
    for i in range(999):
        p.step()
        time.sleep(0.001)
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat 
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

"""
Utilities for dealing with the compilation of modules and creation
of module tress.
"""

import re
import tempfile
try:
    from subprocess import getstatusoutput
except ImportError:
    from commands import getstatusoutput
import os
import os.path
import shutil

import selinux

from . import defaults


def is_valid_name(modname):
    """Check that a module name is valid.
    """
    m = re.findall(r"[^a-zA-Z0-9_\-\.]", modname)
    if len(m) == 0 and modname[0].isalpha():
        return True
    else:
        return False

class ModuleTree:
    def __init__(self, modname):
        self.modname = modname
        self.dirname = None

    def dir_name(self):
        return self.dirname

    def te_name(self):
        return self.dirname + "/" + self.modname + ".te"

    def fc_name(self):
        return self.dirname + "/" + self.modname + ".fc"

    def if_name(self):
        return self.dirname + "/" + self.modname + ".if"

    def package_name(self):
        return self.dirname + "/" + self.modname + ".pp"

    def makefile_name(self):
        return self.dirname + "/Makefile"

    def create(self, parent_dirname, makefile_include=None):
        self.dirname = parent_dirname + "/" + self.modname
        os.mkdir(self.dirname)
        fd = open(self.makefile_name(), "w")
        if makefile_include:
            fd.write("include " + makefile_include)
        else:
            fd.write("include " + defaults.refpolicy_makefile())
        fd.close()

        # Create empty files for the standard refpolicy
        # module files
        open(self.te_name(), "w").close()
        open(self.fc_name(), "w").close()
        open(self.if_name(), "w").close()

def modname_from_sourcename(sourcename):
    return os.path.splitext(os.path.split(sourcename)[1])[0]

class ModuleCompiler:
    """ModuleCompiler eases running of the module compiler.

    The ModuleCompiler class encapsulates running the commandline
    module compiler (checkmodule) and module packager (semodule_package).
    You are likely interested in the create_module_package method.
    
    Several options are controlled via paramaters (only effects the 
    non-refpol builds):
    
     .mls          [boolean] Generate an MLS module (by passed -M to
                   checkmodule). True to generate an MLS module, false
                   otherwise.
                   
     .module       [boolean] Generate a module instead of a base module.
                   True to generate a module, false to generate a base.
                   
     .checkmodule  [string] Fully qualified path to the module compiler.
                   Default is /usr/bin/checkmodule.
                   
     .semodule_package [string] Fully qualified path to the module
                   packager. Defaults to /usr/bin/semodule_package.
     .output       [file object] File object used to write verbose
                   output of the compililation and packaging process.
    """
    def __init__(self, output=None):
        """Create a ModuleCompiler instance, optionally with an
        output file object for verbose output of the compilation process.
        """
        self.mls = selinux.is_selinux_mls_enabled()
        self.module = True
        self.checkmodule = "/usr/bin/checkmodule"
        self.semodule_package = "/usr/bin/semodule_package"
        self.output = output
        self.last_output = ""
        self.refpol_makefile = defaults.refpolicy_makefile()
        self.make = "/usr/bin/make"

    def o(self, str):
        if self.output:
            self.output.write(str + "\n")
        self.last_output = str

    def run(self, command):
        self.o(command)
        rc, output = getstatusoutput(command)
        self.o(output)
        
        return rc
    
    def gen_filenames(self, sourcename):
        """Generate the module and policy package filenames from
        a source file name. The source file must be in the form
        of "foo.te". This will generate "foo.mod" and "foo.pp".
        
        Returns a tuple with (modname, policypackage).
        """
        splitname = sourcename.split(".")
        if len(splitname) < 2:
            raise RuntimeError("invalid sourcefile name %s (must end in .te)", sourcename)
        # Handle other periods in the filename correctly
        basename = ".".join(splitname[0:-1])
        modname = basename + ".mod"
        packagename = basename + ".pp"
        
        return (modname, packagename)

    def create_module_package(self, sourcename, refpolicy=True):
        """Create a module package saved in a packagename from a
        sourcename.

        The create_module_package creates a module package saved in a
        file named sourcename (.pp is the standard extension) from a
        source file (.te is the standard extension). The source file
        should contain SELinux policy statements appropriate for a
        base or non-base module (depending on the setting of .module).

        Only file names are accepted, not open file objects or
        descriptors because the command line SELinux tools are used.

        On error a RuntimeError will be raised with a descriptive
        error message.
        """
        if refpolicy:
            self.refpol_build(sourcename)
        else:
            modname, packagename = self.gen_filenames(sourcename)
            self.compile(sourcename, modname)
            self.package(modname, packagename)
            os.unlink(modname)
            
    def refpol_build(self, sourcename):
        # Compile
        command = self.make + " -f " + self.refpol_makefile
        rc = self.run(command)

        # Raise an error if the process failed
        if rc != 0:
            raise RuntimeError("compilation failed:\n%s" % self.last_output)
        
    def compile(self, sourcename, modname):
        s = [self.checkmodule]
        if self.mls:
            s.append("-M")
        if self.module:
            s.append("-m")
        s.append("-o")
        s.append(modname)
        s.append(sourcename)

        rc = self.run(" ".join(s))
        if rc != 0:
            raise RuntimeError("compilation failed:\n%s" % self.last_output)

    def package(self, modname, packagename):
        s = [self.semodule_package]
        s.append("-o")
        s.append(packagename)
        s.append("-m")
        s.append(modname)
        
        rc = self.run(" ".join(s))
        if rc != 0:
            raise RuntimeError("packaging failed [%s]" % self.last_output)
        
    
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

import string
import selinux

# OVERVIEW
#
# This file contains objects and functions used to represent the reference
# policy (including the headers, M4 macros, and policy language statements).
#
# This representation is very different from the semantic representation
# used in libsepol. Instead, it is a more typical abstract representation
# used by the first stage of compilers. It is basically a parse tree.
#
# This choice is intentional as it allows us to handle the unprocessed
# M4 statements - including the $1 style arguments - and to more easily generate
# the data structures that we need for policy generation.
#

# Constans for referring to fields
SRC_TYPE  = 0
TGT_TYPE  = 1
OBJ_CLASS = 2
PERMS     = 3
ROLE      = 4
DEST_TYPE = 5

# String represenations of the above constants
field_to_str = ["source", "target", "object", "permission", "role", "destination" ]
str_to_field = { "source" : SRC_TYPE, "target" : TGT_TYPE, "object" : OBJ_CLASS,
                "permission" : PERMS, "role" : ROLE, "destination" : DEST_TYPE }

# Base Classes

class PolicyBase:
    def __init__(self, parent=None):
        self.parent = None
        self.comment = None

class Node(PolicyBase):
    """Base class objects produced from parsing the reference policy.

    The Node class is used as the base class for any non-leaf
    object produced by parsing the reference policy. This object
    should contain a reference to its parent (or None for a top-level
    object) and 0 or more children.

    The general idea here is to have a very simple tree structure. Children
    are not separated out by type. Instead the tree structure represents
    fairly closely the real structure of the policy statements.

    The object should be iterable - by default over all children but
    subclasses are free to provide additional iterators over a subset
    of their childre (see Interface for example).
    """

    def __init__(self, parent=None):
        PolicyBase.__init__(self, parent)
        self.children = []

    def __iter__(self):
        return iter(self.children)

    # Not all of the iterators will return something on all Nodes, but
    # they won't explode either. Putting them here is just easier.

    # Top level nodes

    def nodes(self):
        return filter(lambda x: isinstance(x, Node), walktree(self))

    def modules(self):
        return filter(lambda x: isinstance(x, Module), walktree(self))

    def interfaces(self):
        return filter(lambda x: isinstance(x, Interface), walktree(self))

    def templates(self):
        return filter(lambda x: isinstance(x, Template), walktree(self))

    def support_macros(self):
        return filter(lambda x: isinstance(x, SupportMacros), walktree(self))

    # Common policy statements

    def module_declarations(self):
        return filter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))

    def interface_calls(self):
        return filter(lambda x: isinstance(x, InterfaceCall), walktree(self))

    def avrules(self):
        return filter(lambda x: isinstance(x, AVRule), walktree(self))

    def avextrules(self):
        return filter(lambda x: isinstance(x, AVExtRule), walktree(self))

    def typerules(self):
        return filter(lambda x: isinstance(x, TypeRule), walktree(self))

    def typebounds(self):
        return filter(lambda x: isinstance(x, TypeBound), walktree(self))

    def typeattributes(self):
        """Iterate over all of the TypeAttribute children of this Interface."""
        return filter(lambda x: isinstance(x, TypeAttribute), walktree(self))

    def roleattributes(self):
        """Iterate over all of the RoleAttribute children of this Interface."""
        return filter(lambda x: isinstance(x, RoleAttribute), walktree(self))

    def requires(self):
        return filter(lambda x: isinstance(x, Require), walktree(self))

    def roles(self):
        return filter(lambda x: isinstance(x, Role), walktree(self))

    def role_allows(self):
        return filter(lambda x: isinstance(x, RoleAllow), walktree(self))

    def role_types(self):
        return filter(lambda x: isinstance(x, RoleType), walktree(self))

    def __str__(self):
        if self.comment:
            return str(self.comment) + "\n" + self.to_string()
        else:
            return self.to_string()

    def __repr__(self):
        return "<%s(%s)>" % (self.__class__.__name__, self.to_string())

    def to_string(self):
        return ""


class Leaf(PolicyBase):
    def __init__(self, parent=None):
        PolicyBase.__init__(self, parent)

    def __str__(self):
        if self.comment:
            return str(self.comment) + "\n" + self.to_string()
        else:
            return self.to_string()

    def __repr__(self):
        return "<%s(%s)>" % (self.__class__.__name__, self.to_string())

    def to_string(self):
        return ""



# Utility functions

def walktree(node, depthfirst=True, showdepth=False, type=None):
    """Iterate over a Node and its Children.

    The walktree function iterates over a tree containing Nodes and
    leaf objects. The iteration can perform a depth first or a breadth
    first traversal of the tree (controlled by the depthfirst
    paramater. The passed in node will be returned.

    This function will only work correctly for trees - arbitrary graphs
    will likely cause infinite looping.
    """
    # We control depth first / versus breadth first by
    # how we pop items off of the node stack.
    if depthfirst:
        index = -1
    else:
        index = 0

    stack = [(node, 0)]
    while len(stack) > 0:
        cur, depth = stack.pop(index)
        if showdepth:
            yield cur, depth
        else:
            yield cur

        # If the node is not a Node instance it must
        # be a leaf - so no need to add it to the stack
        if isinstance(cur, Node):
            items = []
            i = len(cur.children) - 1
            while i >= 0:
                if type is None or isinstance(cur.children[i], type):
                    items.append((cur.children[i], depth + 1))
                i -= 1

            stack.extend(items)

def walknode(node, type=None):
    """Iterate over the direct children of a Node.

    The walktree function iterates over the children of a Node.
    Unlike walktree it does note return the passed in node or
    the children of any Node objects (that is, it does not go
    beyond the current level in the tree).
    """
    for x in node:
        if type is None or isinstance(x, type):
            yield x


def list_to_space_str(s, cont=('{', '}')):
    """Convert a set (or any sequence type) into a string representation
    formatted to match SELinux space separated list conventions.

    For example the list ['read', 'write'] would be converted into:
    '{ read write }'
    """
    l = len(s)
    str = ""
    if l < 1:
        raise ValueError("cannot convert 0 len set to string")
    str = " ".join(s)
    if l == 1:
        return str
    else:
        return cont[0] + " " + str + " " + cont[1]

def list_to_comma_str(s):
    l = len(s)
    if l < 1:
        raise ValueError("cannot conver 0 len set to comma string")

    return ", ".join(s)

# Basic SELinux types

class IdSet(set):
    def __init__(self, list=None):
        if list:
            set.__init__(self, list)
        else:
            set.__init__(self)
        self.compliment = False

    def to_space_str(self):
        return list_to_space_str(sorted(self))

    def to_comma_str(self):
        return list_to_comma_str(sorted(self))

class SecurityContext(Leaf):
    """An SELinux security context with optional MCS / MLS fields."""
    def __init__(self, context=None, parent=None):
        """Create a SecurityContext object, optionally from a string.

        Parameters:
           [context] - string representing a security context. Same format
              as a string passed to the from_string method.
        """
        Leaf.__init__(self, parent)
        self.user = ""
        self.role = ""
        self.type = ""
        self.level = None
        if context is not None:
            self.from_string(context)

    def from_string(self, context):
        """Parse a string representing a context into a SecurityContext.

        The string should be in the standard format - e.g.,
        'user:role:type:level'.

        Raises ValueError if the string is not parsable as a security context.
        """
        # try to translate the context string to raw form
        raw = selinux.selinux_trans_to_raw_context(context)
        if raw[0] == 0:
            context = raw[1]

        fields = context.split(":")
        if len(fields) < 3:
            raise ValueError("context string [%s] not in a valid format" % context)

        self.user = fields[0]
        self.role = fields[1]
        self.type = fields[2]
        if len(fields) > 3:
            # FUTURE - normalize level fields to allow more comparisons to succeed.
            self.level = ':'.join(fields[3:])
        else:
            self.level = None

    def __eq__(self, other):
        """Compare two SecurityContext objects - all fields must be exactly the
        the same for the comparison to work. It is possible for the level fields
        to be semantically the same yet syntactically different - in this case
        this function will return false.
        """
        return self.user == other.user and \
               self.role == other.role and \
               self.type == other.type and \
               self.level == other.level

    def to_string(self, default_level=None):
        """Return a string representing this security context.

        By default, the string will contiain a MCS / MLS level
        potentially from the default which is passed in if none was
        set.

        Arguments:
           default_level - the default level to use if self.level is an
             empty string.

        Returns:
           A string represening the security context in the form
              'user:role:type:level'.
        """
        fields = [self.user, self.role, self.type]
        if self.level is None:
            if default_level is None:
                if selinux.is_selinux_mls_enabled() == 1:
                    fields.append("s0")
            else:
                fields.append(default_level)
        else:
            fields.append(self.level)
        return ":".join(fields)

class ObjectClass(Leaf):
    """SELinux object class and permissions.

    This class is a basic representation of an SELinux object
    class - it does not represent separate common permissions -
    just the union of the common and class specific permissions.
    It is meant to be convenient for policy generation.
    """
    def __init__(self, name="", parent=None):
        Leaf.__init__(self, parent)
        self.name = name
        self.perms = IdSet()

class XpermSet():
    """Extended permission set.

    This class represents one or more extended permissions
    represented by numeric values or ranges of values. The
    .complement attribute is used to specify all permission
    except those specified.

    Two xperm set can be merged using the .extend() method.
    """
    def __init__(self, complement=False):
        self.complement = complement
        self.ranges = []

    def __normalize_ranges(self):
        """Ensure that ranges are not overlapping.
        """
        self.ranges.sort()

        i = 0
        while i < len(self.ranges):
            while i + 1 < len(self.ranges):
                if self.ranges[i + 1][0] <= self.ranges[i][1] + 1:
                    self.ranges[i] = (self.ranges[i][0], max(self.ranges[i][1],
                                                             self.ranges[i + 1][1]))
                    del self.ranges[i + 1]
                else:
                    break
            i += 1

    def extend(self, s):
        """Add ranges from an xperm set
        """
        self.ranges.extend(s.ranges)
        self.__normalize_ranges()

    def add(self, minimum, maximum=None):
        """Add value of range of values to the xperm set.
        """
        if maximum is None:
            maximum = minimum
        self.ranges.append((minimum, maximum))
        self.__normalize_ranges()

    def to_string(self):
        if not self.ranges:
            return ""

        compl = "~ " if self.complement else ""

        # print single value without braces
        if len(self.ranges) == 1 and self.ranges[0][0] == self.ranges[0][1]:
            return compl + str(self.ranges[0][0])

        vals = map(lambda x: str(x[0]) if x[0] == x[1] else "%s-%s" % x,
                   self.ranges)

        return "%s{ %s }" % (compl, " ".join(vals))

# Basic statements

class TypeAttribute(Leaf):
    """SElinux typeattribute statement.

    This class represents a typeattribute statement.
    """
    def __init__(self, parent=None):
        Leaf.__init__(self, parent)
        self.type = ""
        self.attributes = IdSet()

    def to_string(self):
        return "typeattribute %s %s;" % (self.type, self.attributes.to_comma_str())

class RoleAttribute(Leaf):
    """SElinux roleattribute statement.

    This class represents a roleattribute statement.
    """
    def __init__(self, parent=None):
        Leaf.__init__(self, parent)
        self.role = ""
        self.roleattributes = IdSet()

    def to_string(self):
        return "roleattribute %s %s;" % (self.role, self.roleattributes.to_comma_str())


class Role(Leaf):
    def __init__(self, parent=None):
        Leaf.__init__(self, parent)
        self.role = ""
        self.types = IdSet()

    def to_string(self):
        s = ""
        for t in self.types:
            s += "role %s types %s;\n" % (self.role, t)
        return s

class Type(Leaf):
    def __init__(self, name="", parent=None):
        Leaf.__init__(self, parent)
        self.name = name
        self.attributes = IdSet()
        self.aliases = IdSet()

    def to_string(self):
        s = "type %s" % self.name
        if len(self.aliases) > 0:
            s = s + "alias %s" % self.aliases.to_space_str()
        if len(self.attributes) > 0:
            s = s + ", %s" % self.attributes.to_comma_str()
        return s + ";"

class TypeAlias(Leaf):
    def __init__(self, parent=None):
        Leaf.__init__(self, parent)
        self.type = ""
        self.aliases = IdSet()

    def to_string(self):
        return "typealias %s alias %s;" % (self.type, self.aliases.to_space_str())

class Attribute(Leaf):
    def __init__(self, name="", parent=None):
        Leaf.__init__(self, parent)
        self.name = name

    def to_string(self):
        return "attribute %s;" % self.name

class Attribute_Role(Leaf):
    def __init__(self, name="", parent=None):
        Leaf.__init__(self, parent)
        self.name = name

    def to_string(self):
        return "attribute_role %s;" % self.name


# Classes representing rules

class AVRule(Leaf):
    """SELinux access vector (AV) rule.

    The AVRule class represents all varieties of AV rules including
    allow, dontaudit, and auditallow (indicated by the flags self.ALLOW,
    self.DONTAUDIT, and self.AUDITALLOW respectively).

    The source and target types, object classes, and perms are all represented
    by sets containing strings. Sets are used to make it simple to add
    strings repeatedly while avoiding duplicates.

    No checking is done to make certain that the symbols are valid or
    consistent (e.g., perms that don't match the object classes). It is
    even possible to put inva

Batosay - 2023
IDNSEO Team