forked from protectai/llm-guard
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pyproject.toml
97 lines (88 loc) · 2.85 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
[project]
name = "llm-guard"
description = "LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure."
authors = [
{ name = "Protect AI", email = "[email protected]"}
]
keywords = ["llm", "language model", "security", "adversarial attacks", "prompt injection", "prompt leakage", "PII detection", "self-hardening", "firewall"]
license = { file = "LICENSE" }
readme = "README.md"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]
requires-python = ">=3.9,<3.13"
dependencies = [
"bc-detect-secrets==1.5.17",
"faker>=26.0.0,<31",
"fuzzysearch>=0.7,<0.9",
"json-repair>=0.25.2,<0.31",
"nltk>=3.9.1,<4",
"presidio-analyzer==2.2.354",
"presidio-anonymizer==2.2.354",
"regex==2024.9.11",
"tiktoken>=0.5,<0.8",
"torch>=2.4.0",
"transformers==4.44.2",
"structlog>=24",
"oldest-supported-numpy"
]
[project.optional-dependencies]
onnxruntime = [
"optimum[onnxruntime]==1.23.1",
]
onnxruntime-gpu = [
"optimum[onnxruntime-gpu]==1.23.1",
]
docs-dev = [
"mkdocs>=1.6,<2",
"mkdocs-autorefs==1.2.0",
"mkdocs-git-revision-date-localized-plugin>=1.2",
"mkdocs-jupyter>=0.24",
"mkdocs-material>=9.5",
"mkdocs-material-extensions>=1.3",
"mkdocs-swagger-ui-tag>=0.6",
]
dev = [
"llm_guard[docs-dev]",
"autoflake>=2,<3",
"pytest>=8.0.0,<9",
"pytest-cov>=5.0.0,<6",
"pre-commit>=3.8,<5",
"pyright~=1.1.376",
"ruff>=0.6.1,<0.7.0",
]
[project.urls]
homepage = "https://github.com/protectai/llm-guard"
documentation = "https://llm-guard.com/"
repository = "https://github.com/protectai/llm-guard"
issues = "https://github.com/protectai/llm-guard/issues"
changelog = "https://llm-guard.com/changelog/"
[tool.setuptools]
packages = {find = {where=["."], include=["llm_guard", "llm_guard.*"]}}
license-files = [
"LICENSE"
]
[tool.setuptools.dynamic]
version = {attr = "llm_guard.version.__version__"}
[tool.setuptools.package-data]
llm_guard = [
"**/*.json"
]
[tool.pytest.ini_options]
addopts = "-p no:warnings"
log-level = "DEBUG"
# The flag below should only be activated in special debug sessions
# i.e. the test hangs and we need to see what happened up to that point.
# There are some race conditions with how the logging streams are closed in the teardown
# phase, which will cause tests to fail or "magically" ignored.
log_cli = "False"
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"