diff --git a/.github/workflows/update_leaderboard_after_merge.yaml b/.github/workflows/update_leaderboard_after_merge.yaml
index 5011d22..5575075 100644
--- a/.github/workflows/update_leaderboard_after_merge.yaml
+++ b/.github/workflows/update_leaderboard_after_merge.yaml
@@ -34,6 +34,7 @@ jobs:
with:
script: core.setFailed('More than one submissions are not allowed at once.')
+ # TODO(hetul): Get github profile from email https://api.github.com/search/users?q=EMAIL
- if: ${{ (steps.changes.outputs.src == 'true') && (steps.changes.outputs.src_count == 1) }}
name: Get author's name from last commit
id: author_name
@@ -62,8 +63,8 @@ jobs:
filename_without_extension="${filename%.*}" # Remove extension
python -m scripts.leaderboard --github_user="${{ steps.author_name.outputs.author_name }}" --prompt="$filename_without_extension"
- # - if: ${{ (steps.changes.outputs.src == 'true') && (steps.changes.outputs.src_count == 1) }}
- # name: Commit changes
- # uses: EndBug/add-and-commit@v9
- # with:
- # default_author: github_actions
\ No newline at end of file
+ - if: ${{ (steps.changes.outputs.src == 'true') && (steps.changes.outputs.src_count == 1) }}
+ name: Commit changes
+ uses: EndBug/add-and-commit@v9
+ with:
+ default_author: github_actions
\ No newline at end of file
diff --git a/session_2/challenge/leaderboard.md b/session_2/challenge/leaderboard.md
index f2d4db3..629d499 100644
--- a/session_2/challenge/leaderboard.md
+++ b/session_2/challenge/leaderboard.md
@@ -12,15 +12,10 @@ Check [participation guide](how_to_participate.md).
-| Rank | Profile Image | GitHub Username | Solution | Accuracy % |
-|-------:|:------------------------------------------------------------------------------------------------|:-------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------|-------------:|
-| 1 | | [](https://github.com/) | [baseline](https://github.com/infocusp/llm_seminar_series/blob/main/session_2/challenge/submissions/baseline.py) | 100 |
-| 2 | | [hetulvp](https://github.com/hetulvp) | [baseline_copy](https://github.com/infocusp/llm_seminar_series/blob/main/session_2/challenge/submissions/baseline_copy.py) | 100 |
-| 3 | | [New User](https://github.com/new_user) | [New Solution](https://github.com/new_solution) | 99.5 |
-| 4 | | [Username 2](https://github.com/username2) | [Baseline](https://github.com/infocusp/llm_seminar_series/blob/hetul/prompting-leader-board/session_2/challenge/submissions/baseline.py) | 95 |
-| 5 | | [Username 4](https://github.com/username4) | [Baseline](https://github.com/infocusp/llm_seminar_series/blob/hetul/prompting-leader-board/session_2/challenge/submissions/baseline.py) | 95 |
-| 6 | | [Username 3](https://github.com/username3) | [Baseline](https://github.com/infocusp/llm_seminar_series/blob/hetul/prompting-leader-board/session_2/challenge/submissions/baseline.py) | 10 |
-| 7 | | [Username 1](https://github.com/username1) | [Baseline](https://github.com/infocusp/llm_seminar_series/blob/hetul/prompting-leader-board/session_2/challenge/submissions/baseline.py) | 0 |
+| Rank | Name | Solution | Accuracy % |
+|-------:|:-------|:-----------------------------------------------------------------------------------------------------------------|-------------:|
+| 1 | XYZ | [baseline](https://github.com/infocusp/llm_seminar_series/blob/main/session_2/challenge/submissions/baseline.py) | 100 |
+| 2 | Hetul | [baseline](https://github.com/infocusp/llm_seminar_series/blob/main/session_2/challenge/submissions/baseline.py) | 50 |
\ No newline at end of file
diff --git a/session_2/challenge/scripts/leaderboard.py b/session_2/challenge/scripts/leaderboard.py
index ee14299..9101fd3 100644
--- a/session_2/challenge/scripts/leaderboard.py
+++ b/session_2/challenge/scripts/leaderboard.py
@@ -2,7 +2,7 @@
Sample command:
python -m scripts.leaderboard \
- --github_user=your_github_user \
+ --github_name=your_github_user \
--prompt=baseline
"""
@@ -18,15 +18,15 @@
"prompt", None, "Name of the submitted prompt to evaluate."
)
-_GITHUB_USER = flags.DEFINE_string(
- "github_user", None, "Github username to add an entry in leaderboard."
+_GITHUB_NAME = flags.DEFINE_string(
+ "github_name", None, "Github name to add an entry in leaderboard."
)
_LEADERBORAD = "leaderboard.md" # current leaderboard
-def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
+def generate_leaderboard(prompt_name: str, accuracy: float, github_name: str):
"""Generates leaderboard."""
# Read the markdown table into a DataFrame
with open(_LEADERBORAD, "r") as file:
@@ -39,7 +39,7 @@ def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
# Extract rows using regex
rows = re.findall(
- r"\|([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)\|", table_content
+ r"\|([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)\|", table_content
)[2:]
# Create a DataFrame from the extracted rows
@@ -47,8 +47,7 @@ def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
rows,
columns=[
"Rank",
- "Profile Image",
- "GitHub Username",
+ "Name",
"Solution",
"Accuracy %",
],
@@ -65,9 +64,7 @@ def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
repo_url = "https://github.com/infocusp/llm_seminar_series/blob/main/session_2/challenge/submissions"
new_entry = {
"Rank": len(df) + 1,
- "Profile Image": f'',
- "GitHub Username": f"[{github_user}](https://github.com/{github_user})",
+ "Name": github_name,
"Solution": f"[{prompt_name}]({repo_url}/{prompt_name}.py)",
"Accuracy %": accuracy,
}
@@ -75,7 +72,7 @@ def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
# Keep only the highest submission for each user
- highest_indices = df.groupby("GitHub Username")["Accuracy %"].idxmax()
+ highest_indices = df.groupby("Name")["Accuracy %"].idxmax()
df_highest = df.loc[highest_indices]
# Sort the DataFrame by "Accuracy %" column in descending order
@@ -102,19 +99,19 @@ def generate_leaderboard(prompt_name: str, accuracy: float, github_user: str):
logging.info(
"Submission by %s with prompt %s updated in the leaderboard.",
- github_user,
+ github_name,
prompt_name,
)
-def update_leaderboard(prompt_name: str, github_user: str):
+def update_leaderboard(prompt_name: str, github_name: str):
"""Generates a public leaderboard by evaluating given submission."""
sample_dataset = dataset.load_dataset_from_dir(samples_dir="dataset")
acc = evaluate_lib.evaluate(
dataset=sample_dataset, prompt_name=prompt_name
)
generate_leaderboard(
- prompt_name=prompt_name, accuracy=acc, github_user=github_user
+ prompt_name=prompt_name, accuracy=acc, github_name=github_name
)
@@ -124,11 +121,11 @@ def main(argv: Sequence[str]) -> None:
raise app.UsageError("Too many command-line arguments.")
logging.getLogger().setLevel(logging.INFO)
update_leaderboard(
- prompt_name=_PROMPT.value, github_user=_GITHUB_USER.value
+ prompt_name=_PROMPT.value, github_name=_GITHUB_NAME.value
)
if __name__ == "__main__":
flags.mark_flag_as_required("prompt")
- flags.mark_flag_as_required("github_user")
+ flags.mark_flag_as_required("github_name")
app.run(main)
diff --git a/session_2/challenge/submissions/baseline.py b/session_2/challenge/submissions/baseline.py
index c8f6b3e..cad9630 100644
--- a/session_2/challenge/submissions/baseline.py
+++ b/session_2/challenge/submissions/baseline.py
@@ -14,7 +14,7 @@ def build_prompt(self, job_description: str) -> str:
Say "YES" if the given job description is suitable for
a freshers other wise say "NO".
- {job_description}.
+ {job_description}
"""
return prompt.strip()