Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix bugs in latest version of NumPy #7

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions libmultilabel/linear/tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,18 +95,18 @@ def _beam_search(self, instance_preds: np.ndarray, beam_width: int) -> np.ndarra
continue
slice = np.s_[self.weight_map[node.index] : self.weight_map[node.index + 1]]
pred = instance_preds[slice]
children_score = score - np.maximum(0, 1 - pred) ** 2
children_score = score - np.square(np.maximum(0, 1 - pred))
next_level.extend(zip(node.children, children_score.tolist()))

cur_level = sorted(next_level, key=lambda pair: -pair[1])[:beam_width]
next_level = []

num_labels = len(self.root.label_map)
scores = np.full(num_labels, 0)
scores = np.full(num_labels, 0.0)
for node, score in cur_level:
slice = np.s_[self.weight_map[node.index] : self.weight_map[node.index + 1]]
pred = instance_preds[slice]
scores[node.label_map] = np.exp(score - np.maximum(0, 1 - pred) ** 2)
scores[node.label_map] = np.exp(score - np.square(np.maximum(0, 1 - pred)))
return scores


Expand Down Expand Up @@ -151,14 +151,14 @@ def count(node):
root.dfs(count)

model_size = get_estimated_model_size(root)
print(f'The estimated tree model size is: {model_size / (1024**3):.3f} GB')
print(f"The estimated tree model size is: {model_size / (1024**3):.3f} GB")

# Calculate the total memory (excluding swap) on the local machine
total_memory = psutil.virtual_memory().total
print(f'Your system memory is: {total_memory / (1024**3):.3f} GB')
total_memory = psutil.virtual_memory().total
print(f"Your system memory is: {total_memory / (1024**3):.3f} GB")

if (total_memory <= model_size):
raise MemoryError(f'Not enough memory to train the model.')
if total_memory <= model_size:
raise MemoryError(f"Not enough memory to train the model.")

pbar = tqdm(total=num_nodes, disable=not verbose)

Expand Down Expand Up @@ -221,7 +221,7 @@ def get_estimated_model_size(root):

def collect_stat(node: Node):
nonlocal total_num_weights

if node.isLeaf():
total_num_weights += len(node.label_map) * node.num_features_used
else:
Expand All @@ -231,7 +231,7 @@ def collect_stat(node: Node):

# 16 is because when storing sparse matrices, indices (int64) require 8 bytes and floats require 8 bytes
# Our study showed that among the used features of every binary classification problem, on average no more than 2/3 of weights obtained by the dual coordinate descent method are non-zeros.
return total_num_weights * 16 * 2/3
return total_num_weights * 16 * 2 / 3


def _train_node(y: sparse.csr_matrix, x: sparse.csr_matrix, options: str, node: Node):
Expand Down
Loading