WARNING: THIS SITE IS A MIRROR OF GITHUB.COM / IT CANNOT LOGIN OR REGISTER ACCOUNTS / THE CONTENTS ARE PROVIDED AS-IS / THIS SITE ASSUMES NO RESPONSIBILITY FOR ANY DISPLAYED CONTENT OR LINKS / IF YOU FOUND SOMETHING MAY NOT GOOD FOR EVERYONE, CONTACT ADMIN AT ilovescratch@foxmail.com
Skip to content

Commit 9f952b1

Browse files
committed
fixed pre-commit errors
1 parent 895f428 commit 9f952b1

File tree

3 files changed

+15
-11
lines changed

3 files changed

+15
-11
lines changed

llm_guard/input_scanners/anonymize_helpers/transformers_recognizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,8 +139,8 @@ def _load_pipeline(
139139
self.model.pipeline_kwargs["ignore_labels"] = self.ignore_labels
140140

141141
transformers = cast("transformers", lazy_load_dep("transformers"))
142-
self.pipeline = transformers.pipelines.pipeline(
143-
"ner",
142+
self.pipeline = transformers.pipelines.pipeline( # type: ignore[arg-type]
143+
"ner", # type: ignore[arg-type]
144144
model=tf_model,
145145
tokenizer=tf_tokenizer,
146146
**self.model.pipeline_kwargs,

llm_guard/output_scanners/relevance.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -91,16 +91,20 @@ def __init__(
9191
),
9292
)
9393
assert model.onnx_path is not None
94-
self._model = optimum_onnxruntime.ORTModelForFeatureExtraction.from_pretrained(
95-
model.onnx_path,
96-
export=False,
97-
subfolder=model.onnx_subfolder,
98-
file_name=model.onnx_filename,
99-
revision=model.onnx_revision,
100-
provider=(
94+
onnx_kwargs = {
95+
"export": False,
96+
"subfolder": model.onnx_subfolder,
97+
"file_name": model.onnx_filename,
98+
"provider": (
10199
"CUDAExecutionProvider" if device().type == "cuda" else "CPUExecutionProvider"
102100
),
103101
**model.kwargs,
102+
}
103+
if model.onnx_revision is not None:
104+
onnx_kwargs["revision"] = model.onnx_revision
105+
self._model = optimum_onnxruntime.ORTModelForFeatureExtraction.from_pretrained(
106+
model.onnx_path,
107+
**onnx_kwargs,
104108
)
105109
LOGGER.debug("Initialized ONNX model", model=model, device=device())
106110
else:

llm_guard/util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def truncate_tokens_head_tail(tokens, max_length=512, head_length=128, tail_leng
198198

199199

200200
url_pattern = re.compile(
201-
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
201+
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
202202
)
203203

204204

@@ -225,7 +225,7 @@ def remove_markdown(text):
225225
clean_text = text
226226
for pattern in patterns:
227227
# Use substitution to preserve the text inside ** and *
228-
if "([^\*]+)" in pattern:
228+
if r"([^\*]+)" in pattern:
229229
clean_text = re.sub(pattern, r"\1", clean_text)
230230
else:
231231
clean_text = re.sub(pattern, "", clean_text)

0 commit comments

Comments
 (0)