summaryrefslogtreecommitdiff
path: root/models
diff options
context:
space:
mode:
Diffstat (limited to 'models')
-rw-r--r--models/datatypes.py3
-rw-r--r--models/sku_list.py19
2 files changed, 0 insertions, 22 deletions
diff --git a/models/datatypes.py b/models/datatypes.py
index d15b44e..70613e2 100644
--- a/models/datatypes.py
+++ b/models/datatypes.py
@@ -99,7 +99,6 @@ class CoreModelId(Enum):
# Safety models
llama_guard_3_8b = "Llama-Guard-3-8B"
- prompt_guard_86m = "Prompt-Guard-86M"
llama_guard_2_8b = "Llama-Guard-2-8B"
llama_guard_3_11b_vision = "Llama-Guard-3-11B-Vision"
llama_guard_3_1b = "Llama-Guard-3-1B"
@@ -156,7 +155,6 @@ def model_family(model_id) -> ModelFamily:
return ModelFamily.llama3_2
elif model_id in [
CoreModelId.llama_guard_3_8b,
- CoreModelId.prompt_guard_86m,
CoreModelId.llama_guard_2_8b,
CoreModelId.llama_guard_3_11b_vision,
CoreModelId.llama_guard_3_1b,
@@ -238,7 +236,6 @@ class Model(BaseModel):
return 131072
elif self.core_model_id in [
CoreModelId.llama_guard_3_8b,
- CoreModelId.prompt_guard_86m,
CoreModelId.llama_guard_3_11b_vision,
CoreModelId.llama_guard_3_1b,
]:
diff --git a/models/sku_list.py b/models/sku_list.py
index a45eda5..5cb6d0e 100644
--- a/models/sku_list.py
+++ b/models/sku_list.py
@@ -797,14 +797,6 @@ def safety_models() -> List[Model]:
pth_file_count=1,
),
Model(
- core_model_id=CoreModelId.prompt_guard_86m,
- is_default_variant=True,
- description="Prompt Guard 86M injection safety model",
- huggingface_repo="meta-llama/Prompt-Guard-86M",
- arch_args={},
- pth_file_count=1,
- ),
- Model(
core_model_id=CoreModelId.llama_guard_2_8b,
is_default_variant=True,
description="Llama Guard v2 8b system safety model",
@@ -856,8 +848,6 @@ def llama_meta_net_info(model: Model) -> LlamaDownloadInfo:
folder = "Llama-Guard-3-8B-INT8-HF"
else:
folder = "Llama-Guard-3-8B"
- elif model.core_model_id == CoreModelId.prompt_guard_86m:
- folder = "Prompt-Guard"
elif model.core_model_id == CoreModelId.llama_guard_2_8b:
folder = "llama-guard-2"
else:
@@ -881,15 +871,6 @@ def llama_meta_net_info(model: Model) -> LlamaDownloadInfo:
"model.safetensors.index.json",
]
)
- elif model.core_model_id == CoreModelId.prompt_guard_86m:
- files.extend(
- [
- "model.safetensors",
- "special_tokens_map.json",
- "tokenizer.json",
- "tokenizer_config.json",
- ]
- )
elif (
model.core_model_id == CoreModelId.llama_guard_3_1b
and model.quantization_format == CheckpointQuantizationFormat.int4