Skip to content

Commit

Permalink
Identifiers are currently broken!
Browse files Browse the repository at this point in the history
Changed evaluation to be more object-oriented.
Changed matching to be less recursive and therefore cleaner (fewer one-element matchings).
Began work on trnsform (rref transformation matrix finding function).
  • Loading branch information
nrubin29 committed Oct 8, 2017
1 parent 1cd9c6d commit d358e29
Show file tree
Hide file tree
Showing 8 changed files with 478 additions and 233 deletions.
8 changes: 8 additions & 0 deletions EXPLANATION.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
1. The input is processed into a series of tokens using regular expressions.
* Calculator#_tokenize
2. The tokens are turned into a tree of RuleMatches using a right-recursive pattern matching algorithm.
* Calculator#_match
3. The tree is fixed. Unnecessary tokens are removed, precedence issues are fixed, etc.
* Ast#_fixed
5. The tree is evaluated in a recursive fashion.
* Ast#evaluate
40 changes: 22 additions & 18 deletions ast.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
"""
This file contains the Ast class, which represents an abstract syntax tree which can be evaluated.
"""
import copy
from typing import Dict, Union
from typing import Dict

from common import RuleMatch, remove, left_assoc, Token, Value, value_map
from rules import calc_map
from common import RuleMatch, remove, left_assoc, Token
from rules import rule_process_map, rule_process_value_map


class Ast:
def __init__(self, ast: RuleMatch):
print(self._str(ast))
self.ast = self._fixed(ast)

def _fixed(self, ast):
Expand All @@ -18,15 +18,16 @@ def _fixed(self, ast):
if not isinstance(ast, RuleMatch):
return ast

# This flattens rules with a single matched element.
if len(ast.matched) is 1 and ast.name != 'num' and ast.name != 'mbd':
return self._fixed(ast.matched[0])

# This removes extraneous symbols from the tree.
for i in range(len(ast.matched) - 1, -1, -1):
if ast.matched[i].name in remove:
del ast.matched[i]

# This flattens rules with a single matched element.
if len(ast.matched) is 1 and ast.name != 'mbd':
if ast.name != 'num' or isinstance(ast.matched[0], RuleMatch):
return self._fixed(ast.matched[0])

# This makes left-associative operations left-associative.
for token_name, rule in left_assoc.items():
if len(ast.matched) == 3 and ast.matched[1].name == token_name and isinstance(ast.matched[2], RuleMatch) and len(ast.matched[2].matched) == 3 and ast.matched[2].matched[1].name == token_name:
Expand Down Expand Up @@ -67,19 +68,19 @@ def _evaluate(self, ast, vrs: Dict[str, RuleMatch]): # -> Union[Dict[str, RuleM
return {ast.matched[0].value: ast.matched[1]}

for token in ast.matched:
if isinstance(token, RuleMatch) and not token.value:
token.value = self._evaluate(token, vrs)
if isinstance(token, RuleMatch) and not token.processed:
token.processed = self._evaluate(token, vrs)

if any(map(lambda t: isinstance(t, RuleMatch), ast.matched)):
return calc_map[ast.name](ast.matched)
prms = [token.processed.value for token in ast.matched if isinstance(token, RuleMatch) and token.processed]
tokens = [token for token in ast.matched if not isinstance(token, RuleMatch)]

if ast.name in rule_process_value_map:
processed = rule_process_value_map[ast.name](prms, tokens)

else:
if ast.matched[0].name == 'IDT':
return self._evaluate(copy.deepcopy(vrs[ast.matched[0].value]), vrs)
processed = rule_process_map[ast.name](prms, tokens[0] if len(tokens) > 0 else None) # This extra rule is part of the num hotfix.

else:
# At this point, ast.name will _always_ be `num`.
return calc_map[ast.name](ast.matched)
return processed

def infix(self) -> str:
# TODO: Add parentheses where needed.
Expand All @@ -91,8 +92,11 @@ def _infix(self, ast: RuleMatch) -> str:
def __str__(self):
return self._str(self.ast) # + '\n>> ' + self.infix()

def __repr__(self):
return str(self)

def _str(self, ast, depth=0) -> str:
output = (('\t' * depth) + ast.name + ' = ' + str(ast.value.value)) + '\n'
output = (('\t' * depth) + ast.name + ' = ' + str(ast.processed.value if ast.processed else None)) + '\n'

for matched in ast.matched:
if isinstance(matched, RuleMatch) and matched.matched:
Expand Down
24 changes: 16 additions & 8 deletions calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import re

from ast import Ast
from common import Value, Token, token_map, rules_map, RuleMatch
from common import Value, Token, token_map, rules_map, RuleMatch, ProcessedRuleMatch, rm_index, rm_key_at


class Calculator:
Expand All @@ -23,13 +23,13 @@ def evaluate(self, eqtn: str, verbose=True) -> Value:
ast = Ast(root)
res = ast.evaluate(self.vrs)

if isinstance(res, Value):
ast.ast.value = res
if isinstance(res, ProcessedRuleMatch):
# ast.ast.value = res

if verbose:
print(ast)

return res
return res.value

elif isinstance(res, dict):
self.vrs.update(res)
Expand All @@ -49,10 +49,14 @@ def _tokenize(self, eqtn: str) -> List[Token]:
def _match(self, tokens: List[Token], target_rule: str):
# print('match', tokens, target_rule)

if tokens and tokens[0].name == target_rule: # This is a token, not a rule.
return tokens[0], tokens[1:]
if target_rule.isupper():
# This is a token, not a rule.
if tokens and tokens[0].name == target_rule: # This is a token, not a rule.
return tokens[0], tokens[1:]

for pattern in rules_map.get(target_rule, ()):
return None, None

for pattern in rules_map[target_rule]:
# print('trying pattern', pattern)

remaining_tokens = tokens
Expand All @@ -69,5 +73,9 @@ def _match(self, tokens: List[Token], target_rule: str):
matched.append(m)
else:
# Success!
return RuleMatch(target_rule, matched, None), remaining_tokens
return RuleMatch(target_rule, matched), remaining_tokens

if rm_index(target_rule) + 1 < len(rules_map):
return self._match(tokens, rm_key_at(rm_index(target_rule) + 1))

return None, None
84 changes: 58 additions & 26 deletions common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,26 +2,54 @@
This file contains important information for the calculator.
"""

from collections import namedtuple, OrderedDict
from enum import Enum
from collections import OrderedDict
from typing import List

Token = namedtuple('Token', ('name', 'value'))
Value = namedtuple('Value', ('type', 'value'))
from token_value import Token
from vartypes import Number, Matrix, MatrixRow


class RuleMatch:
def __init__(self, name: str, matched: List[Token], value: Value = None):
def __init__(self, name: str, matched: List[Token]):
self.name = name
self.matched = matched
self.value = value
self.processed = None

def __str__(self):
return 'RuleMatch(' + ', '.join(map(str, [self.name, self.matched, self.value])) + ')'
return self._str(self) # + '\n>> ' + self.infix()

def __repr__(self):
return str(self)

def _str(self, ast, depth=0) -> str:
output = (('\t' * depth) + ast.name + ' = ' + str(ast.processed.value if ast.processed else None)) + '\n'

for matched in ast.matched:
if isinstance(matched, RuleMatch) and matched.matched:
output += self._str(matched, depth + 1)

else:
output += (('\t' * (depth + 1)) + matched.name + ': ' + matched.value) + '\n'

return output

# def __str__(self):
# return 'RuleMatch(' + ', '.join(map(str, [self.name, self.matched])) + ')'
#
# def __repr__(self):
# return str(self)


class ProcessedRuleMatch:
def __init__(self, operation, operands: List, raw_args=False):
self.operation = operation
self.operands = operands

if raw_args:
self.value = operation(operands)
else:
self.value = operation(*operands)


token_map = OrderedDict((
(r'\d+(?:\.\d+)?', 'NUM'),
Expand All @@ -32,6 +60,8 @@ def __repr__(self):
(r'trans', 'OPR'),
(r'cof', 'OPR'),
(r'inv', 'OPR'),
(r'identity', 'OPR'),
(r'trnsform', 'OPR'),
(r'rref', 'OPR'),
(r'[a-zA-Z_]+', 'IDT'),
(r'=', 'EQL'),
Expand All @@ -53,33 +83,35 @@ def __repr__(self):
remove = ('EQL', 'LPA', 'RPA', 'LBR', 'RBR', 'CMA', 'PPE')

rules_map = OrderedDict((
('idt', ('IDT EQL add', 'mat')),
('mat', ('LBR mbd RBR', 'add')),
('mbd', ('mrw PPE mbd', 'mrw', 'add')),
('mrw', ('add CMA mrw', 'add')),
('add', ('mul ADD add', 'mui', 'mul')),
('idt', ('IDT EQL add',)),
('mat', ('LBR mbd RBR',)),
('mbd', ('mrw PPE mbd',)),
('mrw', ('add CMA mrw',)),
('add', ('mul ADD add',)),
('mui', ('pow mul',)),
('mul', ('pow MUL mul', 'pow')),
('pow', ('opr POW pow', 'opr')),
('opr', ('OPR LPA mat RPA', 'neg')),
('neg', ('ADD num', 'ADD opr', 'num')),
('mul', ('pow MUL mul',)),
('pow', ('opr POW pow',)),
('opr', ('OPR LPA mat RPA',)),
('neg', ('ADD num', 'ADD opr')),
('num', ('NUM', 'IDT', 'LPA add RPA')),
))


def rm_index(key):
return list(rules_map.keys()).index(key)


def rm_key_at(i):
return list(rules_map.keys())[i]


left_assoc = {
'ADD': 'add',
'MUL': 'mul',
}


class Type(Enum):
Number = 0
Matrix = 1
MatrixRow = 2


value_map = {
'NUM': Type.Number,
'MAT': Type.Matrix,
'MRW': Type.MatrixRow
'NUM': Number,
'MAT': Matrix,
'MRW': MatrixRow
}
Loading

0 comments on commit d358e29

Please sign in to comment.