Skip to content

Commit

Permalink
Improved Caching
Browse files Browse the repository at this point in the history
  • Loading branch information
masus04 committed Aug 10, 2018
1 parent 6889cc1 commit 6126e5f
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 32 deletions.
8 changes: 6 additions & 2 deletions Othello/environment/board.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,12 @@ def get_empty_spaces(self):
stone_count = self.count_stones()
return self.board_size**2 - stone_count[0] - stone_count[1]

def __eq__(self, other):
return (self.board == other.board).all()

def __hash__(self):
return hash(str(self.board))


""" --- Numba implementations --- '''
Numba 0.36 does not yet fully support custom types.
Expand Down Expand Up @@ -205,5 +211,3 @@ def __get_representation_njit__(board, board_size, color, other_color):
@njit
def in_bounds(board_size, position):
return position[0] >= 0 and position[1] >= 0 and position[0] < board_size and position[1] < board_size


6 changes: 3 additions & 3 deletions Othello/experiments/evaluatePlayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ def run(self, player1, player2, games):
START_TIME = datetime.now()

LR = 1e-5 + random() * 1e-9
PLAYER1 = RandomPlayer()
PLAYER2 = SearchPlayer(search_depth=5, strategy=0)
GAMES = 10
PLAYER1 = SearchPlayer(search_depth=3, strategy=0)
PLAYER2 = ExperiencedPlayer() # RandomPlayer()
GAMES = 50

experiment = EvaluatePlayer()
experiment.run(player1=PLAYER1, player2=PLAYER2, games=GAMES)
Expand Down
48 changes: 21 additions & 27 deletions Othello/players/search_based_ai.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,41 @@
import datetime
import sys
import random
from queue import PriorityQueue
from functools import lru_cache


class GameArtificialIntelligence(object):

def __init__(self, heuristic_fn):
self.heuristic = heuristic_fn
self.trans_table = dict()

def move_search(self, starting_node, search_depth, current_player, other_player):
@lru_cache(maxsize=2**10)
def move_search(self, starting_node, depth, current_player, other_player):
self.player = current_player
self.other_player = other_player
possible_moves = starting_node.get_valid_moves(current_player)
if len(possible_moves) == 1:
return list(possible_moves)[0]
depth = 0

score = -sys.maxsize
move = None
self.queue = PriorityQueue(len(possible_moves))
self.first = True
while depth <= search_depth and starting_node.get_empty_spaces() >= depth:
depth += 1
(new_move, new_score) = self.alpha_beta_wrapper(starting_node, depth, current_player, other_player)
if new_move is not None:
move = new_move
score = new_score
# print "Got to Depth:", depth

(new_move, new_score) = self.alpha_beta_wrapper(starting_node, depth, current_player, other_player)
if new_move is not None:
move = new_move
score = new_score
# print "Got to Depth:", depth
return move


def alpha_beta_wrapper(self, node, depth, current_player, other_player):
alpha = -sys.maxsize-1
beta = sys.maxsize
if self.first:
children = node.child_nodes(current_player)
# Shuffle order of moves evaluated to prevent playing the same game every time
random.shuffle(children)
for (child, move) in children:
if self.queue.queue:
children = self.queue.queue
self.queue = PriorityQueue(self.queue.maxsize)
for (x, child, move) in children:
new_alpha = self.alpha_beta_search(child, depth-1, other_player, current_player, alpha, beta, False)
if new_alpha is None:
return (None, None)
Expand All @@ -47,11 +45,11 @@ def alpha_beta_wrapper(self, node, depth, current_player, other_player):
alpha = new_alpha
best_move = move
#print "Possible move:", move, "Score:", new_alpha
self.first = False
else:
children = self.queue.queue
self.queue = PriorityQueue(self.queue.maxsize)
for (x, child, move) in children:
children = node.child_nodes(current_player)
# Shuffle order of moves evaluated to prevent playing the same game every time
random.shuffle(children)
for (child, move) in children:
new_alpha = self.alpha_beta_search(child, depth-1, other_player, current_player, alpha, beta, False)
if new_alpha is None:
return (None, None)
Expand All @@ -69,12 +67,8 @@ def keyify(self, node, player):

def alpha_beta_search(self, node, depth, current_player, other_player, alpha=-sys.maxsize-1, beta=sys.maxsize, maximizing=True):
if depth == 0 or node.game_won() is not None:
key = self.keyify(node, self.player)
if key in self.trans_table:
return self.trans_table[key]
val = self.heuristic(node, self.player, self.other_player)
self.trans_table[key] = val
return val
return self.heuristic(node, self.player, self.other_player)

children = node.child_nodes(current_player)
if maximizing:
if len(children) == 0:
Expand Down

0 comments on commit 6126e5f

Please sign in to comment.