Skip to content

Commit

Permalink
Merge pull request #52 from Aleph-Alpha/support-Python3.7-google-colab
Browse files Browse the repository at this point in the history
Remove dependency on Literal from typing to support Pyton 3.7
  • Loading branch information
ahartel authored Sep 23, 2022
2 parents ffb6e14 + dab1809 commit d866e4c
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 8 deletions.
7 changes: 6 additions & 1 deletion .github/workflows/integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,18 @@ on: push
jobs:
lint-and-test:
runs-on: ubuntu-latest
strategy:
matrix:
# We support latest 3.x version and 3.7 because
# Google Colab uses 3.7 by default.
python-version: [3.7, 3.x]

steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: "3.x"
python-version: ${{ matrix.python-version }}
cache: "pip"
cache-dependency-path: |
**/setup.py
Expand Down
7 changes: 7 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
# Changelog

## 2.4.1

### Python support

- Minimal supported Python version is now 3.7
- Previously we only supported version 3.8

## 2.4.0

### New feature
Expand Down
23 changes: 21 additions & 2 deletions aleph_alpha_client/aleph_alpha_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,22 @@ def available_models(self):
)
return self._translate_errors(response).json()

def available_checkpoints(self):
"""
Queries all checkpoints which are currently available.
"""
response = self.get_request(
self.host + "checkpoints_available", headers=self.request_headers
)
return self._translate_errors(response).json()

def tokenize(
self, model: Optional[str], prompt: str, tokens: bool = True, token_ids: bool = True, checkpoint: Optional[str] = None
self,
model: Optional[str],
prompt: str,
tokens: bool = True,
token_ids: bool = True,
checkpoint: Optional[str] = None,
):
"""
Tokenizes the given prompt for the given model.
Expand All @@ -153,7 +167,12 @@ def tokenize(
)
return self._translate_errors(response).json()

def detokenize(self, model: Optional[str], token_ids: List[int], checkpoint: Optional[str] = None):
def detokenize(
self,
model: Optional[str],
token_ids: List[int],
checkpoint: Optional[str] = None,
):
"""
Detokenizes the given tokens.
"""
Expand Down
3 changes: 1 addition & 2 deletions aleph_alpha_client/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
Any,
Dict,
List,
Literal,
NamedTuple,
Optional,
Tuple,
Expand Down Expand Up @@ -113,7 +112,7 @@ class SemanticEmbeddingRequest(NamedTuple):

prompt: Prompt
representation: SemanticRepresentation
compress_to_size: Optional[Literal[128]] = None
compress_to_size: Optional[int] = None


class SemanticEmbeddingResponse(NamedTuple):
Expand Down
2 changes: 1 addition & 1 deletion aleph_alpha_client/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "2.4.0"
__version__ = "2.4.1"
5 changes: 3 additions & 2 deletions tests/test_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,10 @@ def test_qa_with_client(client: AlephAlphaClient):
def test_qa_with_client_against_checkpoint(
client: AlephAlphaClient, qa_checkpoint_name: str
):
model_name = "luminous-extended"
# given a client
assert model_name in map(lambda model: model["name"], client.available_models())
assert qa_checkpoint_name in map(
lambda checkpoint: checkpoint["name"], client.available_checkpoints()
)

# when posting a QA request with explicit parameters
response = client.qa(
Expand Down

0 comments on commit d866e4c

Please sign in to comment.