-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
8 changed files
with
285 additions
and
298 deletions.
There are no files selected for viewing
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
""" | ||
This file contains the Ast class, which represents an abstract syntax tree which can be evaluated. | ||
""" | ||
import copy | ||
from typing import Dict, Union | ||
|
||
from common import RuleMatch, remove, left_assoc, Token, Value, value_map | ||
from rules import calc_map | ||
|
||
|
||
class Ast: | ||
def __init__(self, ast: RuleMatch): | ||
self.ast = self._fixed(ast) | ||
|
||
def _fixed(self, ast): | ||
# print('**_fixed ast', ast) | ||
|
||
if not isinstance(ast, RuleMatch): | ||
return ast | ||
|
||
# This flattens rules with a single matched element. | ||
if len(ast.matched) is 1 and ast.name != 'num' and ast.name != 'mbd': | ||
return self._fixed(ast.matched[0]) | ||
|
||
# This removes extraneous symbols from the tree. | ||
for i in range(len(ast.matched) - 1, -1, -1): | ||
if ast.matched[i].name in remove: | ||
del ast.matched[i] | ||
|
||
# This makes left-associative operations left-associative. | ||
for token_name, rule in left_assoc.items(): | ||
if len(ast.matched) == 3 and ast.matched[1].name == token_name and isinstance(ast.matched[2], RuleMatch) and len(ast.matched[2].matched) == 3 and ast.matched[2].matched[1].name == token_name: | ||
ast.matched[0] = RuleMatch(rule, [ast.matched[0], ast.matched[1], ast.matched[2].matched[0]]) | ||
ast.matched[1] = ast.matched[2].matched[1] | ||
ast.matched[2] = ast.matched[2].matched[2] | ||
return self._fixed(ast) | ||
|
||
# This converts implicit multiplication to regular multiplication. | ||
if ast.name == 'mui': | ||
return self._fixed(RuleMatch('mul', [ast.matched[0], Token('MUL', '*'), ast.matched[1]])) | ||
|
||
# This flattens matrix rows into parent matrix rows. | ||
if ast.name == 'mrw' and ast.matched[1].name == 'mrw': | ||
ast.matched[1:] = ast.matched[1].matched | ||
return self._fixed(ast) | ||
|
||
# This flattens matrix bodies into parent matrix bodies. | ||
if ast.name == 'mbd' and len(ast.matched) > 1 and ast.matched[1].name == 'mbd': | ||
ast.matched[1:] = ast.matched[1].matched | ||
return self._fixed(ast) | ||
|
||
if isinstance(ast, RuleMatch): | ||
for i in range(len(ast.matched)): | ||
ast.matched[i] = self._fixed(ast.matched[i]) | ||
|
||
return ast | ||
|
||
def evaluate(self, vrs: Dict[str, RuleMatch]): | ||
res = self._evaluate(self.ast, vrs) | ||
|
||
if isinstance(res, Token): | ||
return Value(value_map[res.name], res.value) | ||
|
||
return res | ||
|
||
def _evaluate(self, ast, vrs: Dict[str, RuleMatch]) -> Union[Dict[str, RuleMatch], Token]: | ||
if ast.name == 'idt': | ||
return {ast.matched[0].value: ast.matched[1]} | ||
|
||
for i in range(len(ast.matched)): | ||
token = ast.matched[i] | ||
|
||
if isinstance(token, RuleMatch): | ||
ast.matched[i] = self._evaluate(token, vrs) | ||
return self._evaluate(ast, vrs) | ||
else: | ||
if ast.matched[0].name == 'IDT': | ||
return self._evaluate(copy.deepcopy(vrs[ast.matched[0].value]), vrs) | ||
|
||
else: | ||
return calc_map[ast.name](ast.matched) | ||
|
||
def infix(self) -> str: | ||
# TODO: Add parentheses where needed. | ||
return self._infix(self.ast) | ||
|
||
def _infix(self, ast: RuleMatch) -> str: | ||
return ' '.join(map(lambda t: t.value if isinstance(t, Token) else self._infix(t), ast.matched)) | ||
|
||
def __str__(self): | ||
return self._str(self.ast) # + '\n>> ' + self.infix() | ||
|
||
def _str(self, ast, depth=0) -> str: | ||
output = (('\t' * depth) + ast.name) + '\n' | ||
|
||
for matched in ast.matched: | ||
# print('**matched', matched) | ||
if isinstance(matched, RuleMatch) and matched.matched: | ||
output += self._str(matched, depth + 1) | ||
|
||
else: | ||
output += (('\t' * (depth + 1)) + matched.name + ': ' + matched.value) + '\n' | ||
|
||
return output |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,75 +1,69 @@ | ||
""" | ||
A calculator implemented with the Shunting Yard Algorithm. The AST version is far superior and this version should be disregarded. | ||
This file contains the Calculator class, which accept an equation and generates an AST, and also keeps track of variables. | ||
""" | ||
from typing import List | ||
|
||
import re | ||
from collections import namedtuple | ||
from operator import add, sub, mul, truediv, pow, mod | ||
|
||
Operator = namedtuple('Operator', ('symbol', 'function', 'precedence', 'associativity')) | ||
from ast import Ast | ||
from common import Value, Token, token_map, rules_map, RuleMatch | ||
|
||
operators = { | ||
'+': Operator('+', add, 2, 'l'), | ||
'-': Operator('-', sub, 2, 'l'), | ||
'*': Operator('*', mul, 3, 'l'), | ||
'/': Operator('/', truediv, 3, 'l'), | ||
'%': Operator('%', mod, 3, 'l'), | ||
'^': Operator('^', pow, 4, 'r'), | ||
'(': Operator('(', None, 5, ''), | ||
')': Operator(')', None, 5, '') | ||
} | ||
|
||
expr = r'[\+\-\*\/|\^\(\)]|\d+' | ||
eqtn = '3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3' # => 3.0001220703 # input() | ||
class Calculator: | ||
def __init__(self): | ||
self.vrs = {} | ||
|
||
rpn = [] | ||
ops = [] | ||
def evaluate(self, eqtn: str) -> Value: | ||
for e in eqtn.split(';'): | ||
root, remaining_tokens = self._match(self._tokenize(e), 'idt') | ||
|
||
for token in re.findall(expr, eqtn): | ||
if re.match(r'\d+', token): | ||
rpn.append(token) | ||
if remaining_tokens: | ||
raise Exception('Invalid equation (bad format)') | ||
|
||
elif token is '(': | ||
ops.append(token) | ||
ast = Ast(root) | ||
print(ast) | ||
res = ast.evaluate(self.vrs) | ||
|
||
elif token is ')': | ||
for op in reversed(list(map(operators.__getitem__, ops))): | ||
if op.symbol is not '(': | ||
rpn.append(ops.pop()) | ||
if isinstance(res, Value): | ||
return res | ||
|
||
else: | ||
break | ||
|
||
ops.pop() | ||
elif isinstance(res, dict): | ||
self.vrs.update(res) | ||
|
||
else: | ||
for op in reversed(list(map(operators.__getitem__, ops))): | ||
if op.symbol is not '(' and op.precedence >= operators[token].precedence and op.associativity is 'l': | ||
rpn.append(ops.pop()) | ||
def _tokenize(self, eqtn: str) -> List[Token]: | ||
tokens = [] | ||
|
||
else: | ||
break | ||
if re.sub('(' + ')|('.join(token_map.keys()) + ')', '', 'eqtn').strip(): | ||
raise Exception('Invalid equation (illegal tokens)') | ||
|
||
ops.append(token) | ||
for match in re.findall('(' + ')|('.join(token_map.keys()) + ')', eqtn): | ||
entry = next(filter(lambda entry: entry[1] != '', enumerate(match)), None) | ||
tokens.append(Token(list(token_map.values())[entry[0]], entry[1])) | ||
|
||
# print('{:20s}|{:20s}|{:20s}'.format(token, ' '.join(rpn), ' '.join(ops)).strip()) | ||
return tokens | ||
|
||
# print() | ||
def _match(self, tokens: List[Token], target_rule: str): | ||
# print('match', tokens, target_rule) | ||
|
||
while len(ops) > 0: | ||
rpn.append(ops.pop()) | ||
if tokens and tokens[0].name == target_rule: # This is a token, not a rule. | ||
return tokens[0], tokens[1:] | ||
|
||
print(' '.join(rpn)) | ||
for pattern in rules_map.get(target_rule, ()): | ||
# print('trying pattern', pattern) | ||
|
||
output = [] | ||
remaining_tokens = tokens | ||
matched = [] | ||
|
||
while len(rpn) > 0: | ||
token = rpn.pop(0) | ||
if re.match(r'\d+', token): | ||
output.append(int(token)) | ||
for pattern_token in pattern.split(): | ||
# print('checking pattern_token', pattern_token) | ||
m, remaining_tokens = self._match(remaining_tokens, pattern_token) | ||
|
||
else: | ||
b, a = output.pop(), output.pop() | ||
output.append(operators[token].function(a, b)) | ||
if not m: | ||
# print('failed pattern match') | ||
break | ||
|
||
print(output[0]) | ||
matched.append(m) | ||
else: | ||
# Success! | ||
return RuleMatch(target_rule, matched), remaining_tokens | ||
return None, None |
Oops, something went wrong.