-
Notifications
You must be signed in to change notification settings - Fork 0
/
text_ml.py
68 lines (49 loc) · 1.99 KB
/
text_ml.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from starlette.requests import Request
from typing import Dict
import ray
from ray import serve
from ray.serve.handle import DeploymentHandle
from transformers import pipeline
@serve.deployment
class Translator:
def __init__(self):
self.language = "french"
self.model = pipeline("translation_en_to_fr", model="t5-small")
def translate(self, text: str) -> str:
model_output = self.model(text)
translation = model_output[0]["translation_text"]
return translation
def reconfigure(self, config: Dict):
self.language = config.get("language", "french")
if self.language.lower() == "french":
self.model = pipeline("translation_en_to_fr", model="t5-small")
elif self.language.lower() == "german":
self.model = pipeline("translation_en_to_de", model="t5-small")
elif self.language.lower() == "romanian":
self.model = pipeline("translation_en_to_ro", model="t5-small")
else:
pass
@serve.deployment
class Summarizer:
def __init__(self, translator: DeploymentHandle):
# Load model
self.model = pipeline("summarization", model="t5-small")
self.translator = translator
self.min_length = 5
self.max_length = 15
def summarize(self, text: str) -> str:
# Run inference
model_output = self.model(
text, min_length=self.min_length, max_length=self.max_length
)
# Post-process output to return only the summary text
summary = model_output[0]["summary_text"]
return summary
async def __call__(self, http_request: Request) -> str:
english_text: str = await http_request.json()
summary = self.summarize(english_text)
return await self.translator.translate.remote(summary)
def reconfigure(self, config: Dict):
self.min_length = config.get("min_length", 5)
self.max_length = config.get("max_length", 15)
app = Summarizer.bind(Translator.bind())