diff --git a/trustgraph-flow/setup.py b/trustgraph-flow/setup.py index fa2c250e..cbebc885 100644 --- a/trustgraph-flow/setup.py +++ b/trustgraph-flow/setup.py @@ -55,7 +55,7 @@ "openai", "neo4j", "tiktoken", - "google-generativeai" + "google-generativeai", ], scripts=[ "scripts/chunker-recursive", diff --git a/trustgraph-flow/trustgraph/model/text_completion/googleaistudio/llm.py b/trustgraph-flow/trustgraph/model/text_completion/googleaistudio/llm.py index 395fc2ed..6a6d6b53 100644 --- a/trustgraph-flow/trustgraph/model/text_completion/googleaistudio/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/googleaistudio/llm.py @@ -74,12 +74,14 @@ def __init__(self, **params): "response_mime_type": "text/plain", } + block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH + self.safety_settings={ - HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH, - HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH, - HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH, - HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH, - HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY: HarmBlockThreshold.BLOCK_ONLY_HIGH, + HarmCategory.HARM_CATEGORY_HATE_SPEECH: block_level, + HarmCategory.HARM_CATEGORY_HARASSMENT: block_level, + HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: block_level, + HarmCategory.HARM_CATEGORY_DANGEROUS: block_level, + HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY: block_level, } self.llm = genai.GenerativeModel(