Upload 44 files
Browse files- .dockerignore +17 -0
- Dockerfile +21 -0
- LICENSE +19 -0
- PROJECT_README.md +101 -0
- README.md +38 -0
- entrypoint.sh +25 -0
- pyproject.toml +75 -0
- src/__init__.py +0 -0
- src/cli.py +72 -0
- src/config.py +34 -0
- src/controllers/__init__.py +1 -0
- src/controllers/cli_controller.py +214 -0
- src/controllers/web_controller.py +167 -0
- src/main.py +90 -0
- src/models/__init__.py +13 -0
- src/models/extractor.py +833 -0
- src/models/field_registry.json +1714 -0
- src/models/gguf_metadata.py +528 -0
- src/models/model_file_extractors.py +44 -0
- src/models/registry.py +535 -0
- src/models/schemas.py +66 -0
- src/models/scoring.py +454 -0
- src/models/service.py +721 -0
- src/schemas/bom-1.6.schema.json +0 -0
- src/schemas/bom-1.7.schema.json +0 -0
- src/schemas/spdx.schema.json +786 -0
- src/static/css/style.css +1288 -0
- src/static/images/cdx.webp +0 -0
- src/static/images/genai_security_project_logo.webp +0 -0
- src/static/js/script.js +116 -0
- src/templates/error.html +51 -0
- src/templates/includes/footer.html +85 -0
- src/templates/includes/header.html +28 -0
- src/templates/index.html +76 -0
- src/templates/result.html +845 -0
- src/utils/__init__.py +3 -0
- src/utils/analytics.py +76 -0
- src/utils/captcha.py +55 -0
- src/utils/cleanup_utils.py +86 -0
- src/utils/formatter.py +25 -0
- src/utils/license_utils.py +129 -0
- src/utils/rate_limiting.py +129 -0
- src/utils/summarizer.py +266 -0
- src/utils/validation.py +158 -0
.dockerignore
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.github
|
| 3 |
+
.pytest_cache
|
| 4 |
+
.ruff_cache
|
| 5 |
+
.venv
|
| 6 |
+
__pycache__
|
| 7 |
+
*.pyc
|
| 8 |
+
*.pyo
|
| 9 |
+
*.pyd
|
| 10 |
+
*.log
|
| 11 |
+
*.tmp
|
| 12 |
+
*.swp
|
| 13 |
+
.DS_Store
|
| 14 |
+
tests
|
| 15 |
+
docs
|
| 16 |
+
sboms
|
| 17 |
+
output
|
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
ENV PYTHONUNBUFFERED=1
|
| 6 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 7 |
+
ENV PIP_NO_CACHE_DIR=1
|
| 8 |
+
ENV PYTHONPATH=/app
|
| 9 |
+
|
| 10 |
+
COPY README.md /app/README.md
|
| 11 |
+
COPY pyproject.toml /app/pyproject.toml
|
| 12 |
+
COPY LICENSE /app/LICENSE
|
| 13 |
+
COPY PROJECT_README.md /app/PROJECT_README.md
|
| 14 |
+
COPY src /app/src
|
| 15 |
+
COPY entrypoint.sh /app/entrypoint.sh
|
| 16 |
+
|
| 17 |
+
RUN pip install --upgrade pip \
|
| 18 |
+
&& pip install -e . \
|
| 19 |
+
&& chmod +x /app/entrypoint.sh
|
| 20 |
+
|
| 21 |
+
ENTRYPOINT ["/app/entrypoint.sh"]
|
LICENSE
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
Copyright 2026 OWASP Foundation - AI SBOM Generator and contributors
|
| 8 |
+
|
| 9 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
you may not use this file except in compliance with the License.
|
| 11 |
+
You may obtain a copy of the License at
|
| 12 |
+
|
| 13 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
|
| 15 |
+
Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
See the License for the specific language governing permissions and
|
| 19 |
+
limitations under the License.
|
PROJECT_README.md
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 OWASP GenAI Security Project - AIBOM Generator
|
| 2 |
+
|
| 3 |
+
This is the official GitHub repository for the **OWASP AIBOM Generator** — an open-source tool for generating **AI Bills of Materials (AIBOMs)** in [CycloneDX](https://cyclonedx.org) format.
|
| 4 |
+
The tool is also listed in the official **[CycloneDX Tool Center](https://cyclonedx.org/tool-center/)**.
|
| 5 |
+
|
| 6 |
+
🚀 **Try the tool live:**
|
| 7 |
+
👉 https://owasp-genai-aibom.org
|
| 8 |
+
🔖 Bookmark and share: https://owasp-genai-aibom.org
|
| 9 |
+
|
| 10 |
+
🌐 OWASP AIBOM Initiative: [genai.owasp.org](https://genai.owasp.org/)
|
| 11 |
+
|
| 12 |
+
> This initiative is about making AI transparency practical. The OWASP AIBOM Generator, running under the OWASP GenAI Security Project, is focused on helping organizations understand what’s actually inside AI models and systems, starting with open models on Hugging Face.
|
| 13 |
+
> Join OWASP GenAI Security Project - AIBOM Initiative to contribute.
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
## 📦 What It Does
|
| 18 |
+
|
| 19 |
+
- Extracts metadata from models hosted on Hugging Face 🤗
|
| 20 |
+
- Generates an **AIBOM** (AI Bill of Materials) in CycloneDX 1.6 JSON format
|
| 21 |
+
- Calculates **AIBOM completeness scoring** with recommendations
|
| 22 |
+
- Supports metadata extraction from model cards, configurations, and repository files
|
| 23 |
+
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
## 🛠 Features
|
| 27 |
+
|
| 28 |
+
- Human-readable AIBOM viewer
|
| 29 |
+
- JSON download
|
| 30 |
+
- Completeness scoring & improvement tips
|
| 31 |
+
- API endpoints for automation
|
| 32 |
+
- Standards-aligned generation (CycloneDX 1.6, compatible with SPDX AI Profile)
|
| 33 |
+
|
| 34 |
+
---
|
| 35 |
+
|
| 36 |
+
## � Installation & Usage
|
| 37 |
+
|
| 38 |
+
### 1. Install Dependencies
|
| 39 |
+
```bash
|
| 40 |
+
pip install -r requirements.txt
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
Or, if you prefer [uv](https://docs.astral.sh/uv/) for faster dependency management:
|
| 44 |
+
```bash
|
| 45 |
+
uv sync
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
### 2. Run Web Application
|
| 49 |
+
Start the local server at `http://localhost:8000`:
|
| 50 |
+
```bash
|
| 51 |
+
python3 -m src.main
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
### 3. Run via CLI
|
| 55 |
+
Generate an AIBOM for a Hugging Face model directly from your terminal:
|
| 56 |
+
|
| 57 |
+
**Basic Usage:**
|
| 58 |
+
```bash
|
| 59 |
+
python3 -m src.cli google-bert/bert-base-uncased
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
**Advanced Usage:**
|
| 63 |
+
You can specify additional metadata like component name, version, and supplier.
|
| 64 |
+
```bash
|
| 65 |
+
python3 -m src.cli google-bert/bert-base-uncased \
|
| 66 |
+
--name "My Custom BERT" \
|
| 67 |
+
--version "1.0.0" \
|
| 68 |
+
--manufacturer "Acme Corp" \
|
| 69 |
+
--output "my_sbom.json"
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
**Command Line Options:**
|
| 73 |
+
|
| 74 |
+
| Option | Shorthand | Description |
|
| 75 |
+
|--------|-----------|-------------|
|
| 76 |
+
| `model_id` | | Hugging Face Model ID (e.g., `owner/model`) |
|
| 77 |
+
| `--test` | `-t` | Run test mode for multiple predefined models |
|
| 78 |
+
| `--output` | `-o` | Custom output file path |
|
| 79 |
+
| `--name` | `-n` | Override component name in metadata |
|
| 80 |
+
| `--version` | `-v` | Override component version in metadata |
|
| 81 |
+
| `--manufacturer` | `-m` | Override component manufacturer/supplier |
|
| 82 |
+
| `--inference` | `-i` | Use AI inference for enhanced metadata (requires API key) |
|
| 83 |
+
| `--summarize` | `-s` | Enable intelligent description summarization |
|
| 84 |
+
| `--verbose` | | Enable verbose logging |
|
| 85 |
+
|
| 86 |
+
* Metrics and produced SBOMs are saved to the `sboms/` directory by default.
|
| 87 |
+
|
| 88 |
+
---
|
| 89 |
+
|
| 90 |
+
## �🐞 Found a Bug or Have an Improvement Request?
|
| 91 |
+
|
| 92 |
+
We welcome contributions and feedback.
|
| 93 |
+
|
| 94 |
+
➡ **Log an issue:**
|
| 95 |
+
https://github.com/GenAI-Security-Project/aibom-generator/issues
|
| 96 |
+
|
| 97 |
+
---
|
| 98 |
+
|
| 99 |
+
## 📄 License
|
| 100 |
+
|
| 101 |
+
This project is open-source and available under the [Apache 2.0 License](LICENSE).
|
README.md
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: OWASP AIBOM Generator
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: true
|
| 9 |
+
license: apache-2.0
|
| 10 |
+
short_description: OWASP GenAI Security Project - AI Bill of Materials
|
| 11 |
+
thumbnail: >-
|
| 12 |
+
https://cdn-uploads.huggingface.co/production/uploads/666afcef4fcfc38e18cba142/G7x702vfcrrarm6utDQoM.png
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
# OWASP AIBOM Generator
|
| 16 |
+
|
| 17 |
+
This Space runs the existing OWASP AIBOM Generator web application as a Docker
|
| 18 |
+
Space. It generates AI Bills of Materials for Hugging Face-hosted models using
|
| 19 |
+
the same service and business logic as the main project.
|
| 20 |
+
|
| 21 |
+
## Usage
|
| 22 |
+
|
| 23 |
+
1. Enter a Hugging Face model ID or model URL.
|
| 24 |
+
2. Submit the form.
|
| 25 |
+
3. Review the generated AIBOM and download the JSON output.
|
| 26 |
+
|
| 27 |
+
## Runtime notes
|
| 28 |
+
|
| 29 |
+
- Default web startup binds to `0.0.0.0:${PORT:-7860}`.
|
| 30 |
+
- Generated output and Hugging Face caches prefer `/data` when persistent
|
| 31 |
+
storage is available.
|
| 32 |
+
- If `/data` is not available, the container falls back to `/tmp`.
|
| 33 |
+
- `HF_TOKEN` is optional. Without it, analytics logging and private-model access
|
| 34 |
+
may be limited, but the public-model web app still works.
|
| 35 |
+
|
| 36 |
+
## Project
|
| 37 |
+
|
| 38 |
+
The main project documentation is bundled in `PROJECT_README.md`.
|
entrypoint.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/sh
|
| 2 |
+
set -eu
|
| 3 |
+
|
| 4 |
+
if [ -d "/data" ] && [ -w "/data" ]; then
|
| 5 |
+
CACHE_ROOT="/data/.cache/huggingface"
|
| 6 |
+
OUTPUT_ROOT="/data/aibom_output"
|
| 7 |
+
else
|
| 8 |
+
CACHE_ROOT="/tmp/.cache/huggingface"
|
| 9 |
+
OUTPUT_ROOT="/tmp/aibom_output"
|
| 10 |
+
fi
|
| 11 |
+
|
| 12 |
+
mkdir -p "${CACHE_ROOT}" "${OUTPUT_ROOT}"
|
| 13 |
+
|
| 14 |
+
export HF_HOME="${HF_HOME:-${CACHE_ROOT}}"
|
| 15 |
+
export TRANSFORMERS_CACHE="${TRANSFORMERS_CACHE:-${CACHE_ROOT}/transformers}"
|
| 16 |
+
export AIBOM_OUTPUT_DIR="${AIBOM_OUTPUT_DIR:-${OUTPUT_ROOT}}"
|
| 17 |
+
export PORT="${PORT:-7860}"
|
| 18 |
+
|
| 19 |
+
mkdir -p "${TRANSFORMERS_CACHE}" "${AIBOM_OUTPUT_DIR}"
|
| 20 |
+
|
| 21 |
+
if [ "$#" -gt 0 ]; then
|
| 22 |
+
exec python -m src.cli "$@"
|
| 23 |
+
fi
|
| 24 |
+
|
| 25 |
+
exec uvicorn src.main:app --host 0.0.0.0 --port "${PORT}"
|
pyproject.toml
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
[project]
|
| 3 |
+
name = "owasp-aibom-generator"
|
| 4 |
+
version = "1.0.2"
|
| 5 |
+
description = "A comprehensive AI Bill of Materials (AIBOM) generation tool for Hugging Face models."
|
| 6 |
+
authors = [
|
| 7 |
+
{ name = "OWASP GenAI Security Project", email = "genai-security@owasp.org" }
|
| 8 |
+
]
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.11"
|
| 11 |
+
license = { text = "Apache-2.0" }
|
| 12 |
+
classifiers = [
|
| 13 |
+
"Programming Language :: Python :: 3",
|
| 14 |
+
"License :: OSI Approved :: Apache Software License",
|
| 15 |
+
"Operating System :: OS Independent",
|
| 16 |
+
"Topic :: Security",
|
| 17 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
| 18 |
+
]
|
| 19 |
+
dependencies = [
|
| 20 |
+
"beautifulsoup4>=4.11.0",
|
| 21 |
+
"datasets>=2.0.0",
|
| 22 |
+
"fastapi>=0.104.0",
|
| 23 |
+
"flask>=2.3.0",
|
| 24 |
+
"gunicorn>=21.2.0",
|
| 25 |
+
"httpx>=0.25.0",
|
| 26 |
+
"huggingface_hub>=0.19.0",
|
| 27 |
+
"jinja2>=3.0.0",
|
| 28 |
+
"jsonschema>=4.17.0",
|
| 29 |
+
"license-expression>=30.4.4",
|
| 30 |
+
"packageurl-python>=0.11.1",
|
| 31 |
+
"pydantic>=2.4.0",
|
| 32 |
+
"python-multipart",
|
| 33 |
+
"PyYAML>=6.0.1",
|
| 34 |
+
"requests>=2.31.0",
|
| 35 |
+
"sentencepiece>=0.1.99",
|
| 36 |
+
"torch>=2.0.0",
|
| 37 |
+
"transformers>=4.36.0",
|
| 38 |
+
"uvicorn>=0.24.0",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
[project.optional-dependencies]
|
| 42 |
+
dev = [
|
| 43 |
+
"pytest>=7.0.0",
|
| 44 |
+
"pytest-cov>=4.0.0",
|
| 45 |
+
"pytest-mock>=3.10.0",
|
| 46 |
+
"ruff",
|
| 47 |
+
"gguf>=0.6.0"
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
[project.scripts]
|
| 51 |
+
aibom = "src.cli:main"
|
| 52 |
+
|
| 53 |
+
[build-system]
|
| 54 |
+
requires = ["setuptools>=61.0", "wheel"]
|
| 55 |
+
build-backend = "setuptools.build_meta"
|
| 56 |
+
|
| 57 |
+
[tool.setuptools.packages.find]
|
| 58 |
+
where = ["."]
|
| 59 |
+
include = ["src*"]
|
| 60 |
+
namespaces = false
|
| 61 |
+
|
| 62 |
+
[tool.pytest.ini_options]
|
| 63 |
+
minversion = "6.0"
|
| 64 |
+
addopts = "-ra -q --cov=src"
|
| 65 |
+
testpaths = [
|
| 66 |
+
"tests",
|
| 67 |
+
]
|
| 68 |
+
pythonpath = [
|
| 69 |
+
"."
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
[dependency-groups]
|
| 73 |
+
dev = [
|
| 74 |
+
"gguf>=0.6.0",
|
| 75 |
+
]
|
src/__init__.py
ADDED
|
File without changes
|
src/cli.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import sys
|
| 3 |
+
from .controllers.cli_controller import CLIController
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
parser = argparse.ArgumentParser(description="OWASP AIBOM Generator CLI")
|
| 7 |
+
parser.add_argument("model_id", nargs="?", help="Hugging Face Model ID (e.g. 'owner/model')")
|
| 8 |
+
parser.add_argument("--test", "-t", action="store_true", help="Run test mode for multiple predefined models to verify description generation")
|
| 9 |
+
parser.add_argument("--output", "-o", help="Output file path")
|
| 10 |
+
parser.add_argument("--inference", "-i", action="store_true", help="Use AI inference for enhanced metadata (requires configured valid endpoint)")
|
| 11 |
+
parser.add_argument("--summarize", "-s", action="store_true", help="Enable intelligent description summarization (requires model download)")
|
| 12 |
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
| 13 |
+
parser.add_argument("--name", "-n", help="Component name in metadata")
|
| 14 |
+
parser.add_argument("--version", "-v", help="Component version in metadata")
|
| 15 |
+
parser.add_argument("--manufacturer", "-m", help="Component manufacturer/supplier in metadata")
|
| 16 |
+
|
| 17 |
+
args = parser.parse_args()
|
| 18 |
+
|
| 19 |
+
controller = CLIController()
|
| 20 |
+
|
| 21 |
+
if args.test:
|
| 22 |
+
test_models = [
|
| 23 |
+
"Qwen/Qwen3.5-397B-A17B",
|
| 24 |
+
"nvidia/personaplex-7b-v1",
|
| 25 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
| 26 |
+
"unsloth/Qwen3.5-35B-A3B-GGUF",
|
| 27 |
+
"LocoreMind/LocoOperator-4B",
|
| 28 |
+
"Nanbeige/Nanbeige4.1-3B",
|
| 29 |
+
"zai-org/GLM-5",
|
| 30 |
+
"MiniMaxAI/MiniMax-M2.5",
|
| 31 |
+
"unsloth/Qwen3.5-397B-A17B-GGUF",
|
| 32 |
+
"FireRedTeam/FireRed-Image-Edit-1.0",
|
| 33 |
+
"nvidia/NVIDIA-Nemotron-Nano-9B-v2-Japanese",
|
| 34 |
+
"mistralai/Voxtral-Mini-4B-Realtime-2602",
|
| 35 |
+
"TeichAI/GLM-4.7-Flash-Claude-Opus-4.5-High-Reasoning-Distill-GGUF",
|
| 36 |
+
"CIRCL/vulnerability-severity-classification-roberta-base"
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
print(f"Running test mode against {len(test_models)} models...")
|
| 40 |
+
for model in test_models:
|
| 41 |
+
print(f"\n{'='*50}\nTesting model: {model}\n{'='*50}")
|
| 42 |
+
try:
|
| 43 |
+
controller.generate(
|
| 44 |
+
model_id=model,
|
| 45 |
+
output_file=args.output,
|
| 46 |
+
include_inference=args.inference,
|
| 47 |
+
enable_summarization=True, # Ensure summarization is on for testing description
|
| 48 |
+
verbose=args.verbose,
|
| 49 |
+
name=args.name,
|
| 50 |
+
version=args.version,
|
| 51 |
+
manufacturer=args.manufacturer
|
| 52 |
+
)
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print(f"Error testing {model}: {e}")
|
| 55 |
+
sys.exit(0)
|
| 56 |
+
|
| 57 |
+
if not args.model_id:
|
| 58 |
+
parser.error("model_id is required unless --test is specified")
|
| 59 |
+
|
| 60 |
+
controller.generate(
|
| 61 |
+
model_id=args.model_id,
|
| 62 |
+
output_file=args.output,
|
| 63 |
+
include_inference=args.inference,
|
| 64 |
+
enable_summarization=args.summarize,
|
| 65 |
+
verbose=args.verbose,
|
| 66 |
+
name=args.name,
|
| 67 |
+
version=args.version,
|
| 68 |
+
manufacturer=args.manufacturer
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if __name__ == "__main__":
|
| 72 |
+
main()
|
src/config.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
import tomllib
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Base Directory Setup
|
| 7 |
+
BASE_DIR = Path(__file__).resolve().parent
|
| 8 |
+
OUTPUT_DIR = os.getenv("AIBOM_OUTPUT_DIR") or "/tmp/aibom_output"
|
| 9 |
+
# Ensure absolute path for security
|
| 10 |
+
if not os.path.isabs(OUTPUT_DIR):
|
| 11 |
+
OUTPUT_DIR = os.path.abspath(OUTPUT_DIR)
|
| 12 |
+
|
| 13 |
+
def get_project_metadata() -> tuple[str, str]:
|
| 14 |
+
try:
|
| 15 |
+
pyproject_path = BASE_DIR.parent / "pyproject.toml"
|
| 16 |
+
with open(pyproject_path, "rb") as f:
|
| 17 |
+
data = tomllib.load(f)
|
| 18 |
+
return data["project"]["name"], data["project"]["version"]
|
| 19 |
+
except Exception:
|
| 20 |
+
return "owasp-aibom-generator", "1.0.2"
|
| 21 |
+
|
| 22 |
+
AIBOM_GEN_NAME, AIBOM_GEN_VERSION = get_project_metadata()
|
| 23 |
+
|
| 24 |
+
TEMPLATES_DIR = BASE_DIR / "templates"
|
| 25 |
+
|
| 26 |
+
# Cleanup Configuration
|
| 27 |
+
MAX_AGE_DAYS = 7
|
| 28 |
+
MAX_FILES = 1000
|
| 29 |
+
CLEANUP_INTERVAL = 100
|
| 30 |
+
|
| 31 |
+
# Hugging Face Setup
|
| 32 |
+
HF_REPO = "owasp-genai-security-project/aisbom-usage-log"
|
| 33 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 34 |
+
RECAPTCHA_SITE_KEY = os.getenv("RECAPTCHA_SITE_KEY")
|
src/controllers/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Controllers package
|
src/controllers/cli_controller.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from ..models.service import AIBOMService
|
| 5 |
+
from ..models.scoring import calculate_completeness_score
|
| 6 |
+
from ..models.scoring import calculate_completeness_score
|
| 7 |
+
from ..config import OUTPUT_DIR, TEMPLATES_DIR
|
| 8 |
+
from ..utils.formatter import export_aibom
|
| 9 |
+
import os
|
| 10 |
+
import shutil
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
class CLIController:
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.service = AIBOMService()
|
| 17 |
+
|
| 18 |
+
def _validate_spdx_schema_version(self, aibom_data: dict, spec_version: str):
|
| 19 |
+
"""
|
| 20 |
+
TODO: Implement SPDX schema validation.
|
| 21 |
+
"""
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
def generate(self, model_id: str, output_file: Optional[str] = None, include_inference: bool = False,
|
| 25 |
+
enable_summarization: bool = False, verbose: bool = False,
|
| 26 |
+
name: Optional[str] = None, version: Optional[str] = None, manufacturer: Optional[str] = None):
|
| 27 |
+
if verbose:
|
| 28 |
+
logging.getLogger().setLevel(logging.INFO)
|
| 29 |
+
|
| 30 |
+
print(f"Generating AIBOM for {model_id}...")
|
| 31 |
+
|
| 32 |
+
versions_to_generate = ["1.6", "1.7"]
|
| 33 |
+
reports = []
|
| 34 |
+
generated_aiboms = {}
|
| 35 |
+
|
| 36 |
+
print(f" - Generating AIBOM model data...")
|
| 37 |
+
try:
|
| 38 |
+
primary_aibom = self.service.generate_aibom(
|
| 39 |
+
model_id,
|
| 40 |
+
include_inference=include_inference,
|
| 41 |
+
enable_summarization=enable_summarization,
|
| 42 |
+
metadata_overrides={
|
| 43 |
+
"name": name,
|
| 44 |
+
"version": version,
|
| 45 |
+
"manufacturer": manufacturer
|
| 46 |
+
}
|
| 47 |
+
)
|
| 48 |
+
primary_report = self.service.get_enhancement_report()
|
| 49 |
+
|
| 50 |
+
# Formatted AIBOM Strings
|
| 51 |
+
json_1_6 = export_aibom(primary_aibom, bom_type="cyclonedx", spec_version="1.6")
|
| 52 |
+
json_1_7 = export_aibom(primary_aibom, bom_type="cyclonedx", spec_version="1.7")
|
| 53 |
+
|
| 54 |
+
# Determine output filenames
|
| 55 |
+
normalized_id = self.service._normalise_model_id(model_id)
|
| 56 |
+
os.makedirs("sboms", exist_ok=True)
|
| 57 |
+
|
| 58 |
+
output_file_1_6 = output_file
|
| 59 |
+
if not output_file_1_6:
|
| 60 |
+
output_file_1_6 = os.path.join("sboms", f"{normalized_id.replace('/', '_')}_ai_sbom_1_6.json")
|
| 61 |
+
|
| 62 |
+
base, ext = os.path.splitext(output_file_1_6)
|
| 63 |
+
output_file_1_7 = f"{base.replace('_1_6', '')}_1_7{ext}" if '_1_6' in base else f"{base}_1_7{ext}"
|
| 64 |
+
|
| 65 |
+
with open(output_file_1_6, 'w') as f:
|
| 66 |
+
f.write(json_1_6)
|
| 67 |
+
with open(output_file_1_7, 'w') as f:
|
| 68 |
+
f.write(json_1_7)
|
| 69 |
+
|
| 70 |
+
# Check for validation results
|
| 71 |
+
validation_data = primary_report.get("final_score", {}).get("validation", {})
|
| 72 |
+
is_valid = validation_data.get("valid", True)
|
| 73 |
+
validation_errors = [i["message"] for i in validation_data.get("issues", [])]
|
| 74 |
+
|
| 75 |
+
if "schema_validation" not in primary_report:
|
| 76 |
+
primary_report["schema_validation"] = {}
|
| 77 |
+
primary_report["schema_validation"]["valid"] = is_valid
|
| 78 |
+
primary_report["schema_validation"]["errors"] = validation_errors
|
| 79 |
+
primary_report["schema_validation"]["error_count"] = len(validation_errors)
|
| 80 |
+
|
| 81 |
+
reports = [
|
| 82 |
+
{"spec_version": "1.6", "output_file": output_file_1_6, "schema_validation": primary_report["schema_validation"]},
|
| 83 |
+
{"spec_version": "1.7", "output_file": output_file_1_7, "schema_validation": primary_report["schema_validation"]}
|
| 84 |
+
]
|
| 85 |
+
output_file_primary = output_file_1_6
|
| 86 |
+
|
| 87 |
+
except Exception as e:
|
| 88 |
+
logger.error(f"Failed to generate SBOM: {e}", exc_info=True)
|
| 89 |
+
print(f" ❌ Failed to generate SBOM: {e}")
|
| 90 |
+
reports = []
|
| 91 |
+
|
| 92 |
+
if reports:
|
| 93 |
+
if output_file_primary:
|
| 94 |
+
try:
|
| 95 |
+
from jinja2 import Environment, FileSystemLoader, select_autoescape
|
| 96 |
+
from ..config import TEMPLATES_DIR
|
| 97 |
+
|
| 98 |
+
env = Environment(
|
| 99 |
+
loader=FileSystemLoader(TEMPLATES_DIR),
|
| 100 |
+
autoescape=select_autoescape(['html', 'xml'])
|
| 101 |
+
)
|
| 102 |
+
template = env.get_template("result.html")
|
| 103 |
+
|
| 104 |
+
completeness_score = primary_report.get("final_score")
|
| 105 |
+
if not completeness_score:
|
| 106 |
+
completeness_score = calculate_completeness_score(primary_aibom)
|
| 107 |
+
|
| 108 |
+
# Pre-serialize to preserve order
|
| 109 |
+
components_json = json.dumps(primary_aibom.get("components", []), indent=2)
|
| 110 |
+
|
| 111 |
+
context = {
|
| 112 |
+
"request": None,
|
| 113 |
+
"filename": os.path.basename(output_file_primary),
|
| 114 |
+
"download_url": "#",
|
| 115 |
+
"aibom": primary_aibom,
|
| 116 |
+
"components_json": components_json,
|
| 117 |
+
"aibom_cdx_json_1_6": json_1_6,
|
| 118 |
+
"aibom_cdx_json_1_7": json_1_7,
|
| 119 |
+
"raw_aibom": primary_aibom,
|
| 120 |
+
"model_id": self.service._normalise_model_id(model_id),
|
| 121 |
+
"sbom_count": 0,
|
| 122 |
+
"completeness_score": completeness_score,
|
| 123 |
+
"enhancement_report": primary_report or {},
|
| 124 |
+
"result_file": "#",
|
| 125 |
+
"static_root": "static"
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
html_content = template.render(context)
|
| 129 |
+
html_output_file = output_file_primary.replace("_1_6.json", ".html").replace(".json", ".html")
|
| 130 |
+
with open(html_output_file, "w") as f:
|
| 131 |
+
f.write(html_content)
|
| 132 |
+
|
| 133 |
+
print(f"\n📄 HTML Report:\n {html_output_file}")
|
| 134 |
+
|
| 135 |
+
# Copy static assets
|
| 136 |
+
try:
|
| 137 |
+
# output_file_primary is e.g. sboms/model_id_ai_sbom.json
|
| 138 |
+
# html_output_file is sboms/model_id_ai_sbom.html
|
| 139 |
+
output_dir = os.path.dirname(html_output_file)
|
| 140 |
+
# src/static relative to CLI execution root or module
|
| 141 |
+
# Let's use absolute path relative to this file to be safe
|
| 142 |
+
current_dir = os.path.dirname(os.path.abspath(__file__)) # src/controllers
|
| 143 |
+
src_dir = os.path.dirname(current_dir) # src
|
| 144 |
+
static_src = os.path.join(src_dir, "static")
|
| 145 |
+
static_dst = os.path.join(output_dir, "static")
|
| 146 |
+
|
| 147 |
+
if os.path.exists(static_src):
|
| 148 |
+
if os.path.exists(static_dst):
|
| 149 |
+
shutil.rmtree(static_dst)
|
| 150 |
+
shutil.copytree(static_src, static_dst)
|
| 151 |
+
# print(f" - Static assets copied to: {static_dst}")
|
| 152 |
+
else:
|
| 153 |
+
logger.warning(f"Static source directory not found: {static_src}")
|
| 154 |
+
|
| 155 |
+
except Exception as e:
|
| 156 |
+
logger.warning(f"Failed to copy static assets: {e}")
|
| 157 |
+
|
| 158 |
+
# Model Description
|
| 159 |
+
if "components" in primary_aibom and primary_aibom["components"]:
|
| 160 |
+
description = primary_aibom["components"][0].get("description", "No description available")
|
| 161 |
+
if len(description) > 256:
|
| 162 |
+
description = description[:253] + "..."
|
| 163 |
+
print(f"\n📝 Model Description:\n {description}")
|
| 164 |
+
|
| 165 |
+
# License
|
| 166 |
+
if "components" in primary_aibom and primary_aibom["components"]:
|
| 167 |
+
comp = primary_aibom["components"][0]
|
| 168 |
+
if "licenses" in comp:
|
| 169 |
+
license_list = []
|
| 170 |
+
for l in comp["licenses"]:
|
| 171 |
+
lic = l.get("license", {})
|
| 172 |
+
val = lic.get("id") or lic.get("name")
|
| 173 |
+
if val:
|
| 174 |
+
license_list.append(val)
|
| 175 |
+
if license_list:
|
| 176 |
+
print(f"\n⚖️ License:\n {', '.join(license_list)}")
|
| 177 |
+
|
| 178 |
+
except Exception as e:
|
| 179 |
+
logger.warning(f"Failed to generate HTML report: {e}")
|
| 180 |
+
|
| 181 |
+
# Print Summary for ALL versions
|
| 182 |
+
for r in reports:
|
| 183 |
+
spec = r.get("spec_version", "1.6")
|
| 184 |
+
print(f"\n✅ Successfully generated CycloneDX {spec} SBOM:")
|
| 185 |
+
print(f" {r.get('output_file')}")
|
| 186 |
+
|
| 187 |
+
if not r["schema_validation"]["valid"]:
|
| 188 |
+
print(f"⚠️ Schema Validation Errors ({spec}):")
|
| 189 |
+
for err in r["schema_validation"]["errors"]:
|
| 190 |
+
print(f" - {err}")
|
| 191 |
+
else:
|
| 192 |
+
print(f" - Schema Validation ({spec}): ✅ Valid")
|
| 193 |
+
|
| 194 |
+
# Display Detailed Score Summary (from primary)
|
| 195 |
+
if primary_report and "final_score" in primary_report:
|
| 196 |
+
score = primary_report["final_score"]
|
| 197 |
+
t_score = score.get('total_score', 0)
|
| 198 |
+
formatted_t_score = int(t_score) if isinstance(t_score, (int, float)) and t_score == int(t_score) else t_score
|
| 199 |
+
print(f"\n📊 Completeness Score: {formatted_t_score}/100")
|
| 200 |
+
|
| 201 |
+
if "completeness_profile" in score:
|
| 202 |
+
profile = score["completeness_profile"]
|
| 203 |
+
print(f" Profile: {profile.get('name')} - {profile.get('description')}")
|
| 204 |
+
|
| 205 |
+
if "section_scores" in score:
|
| 206 |
+
print("\n📋 Section Breakdown:")
|
| 207 |
+
|
| 208 |
+
for section, s_score in score["section_scores"].items():
|
| 209 |
+
max_s = score.get("max_scores", {}).get(section, "?")
|
| 210 |
+
formatted_s_score = int(s_score) if isinstance(s_score, (int, float)) and s_score == int(s_score) else s_score
|
| 211 |
+
print(f" - {section.replace('_', ' ').title()}: {formatted_s_score}/{max_s}")
|
| 212 |
+
|
| 213 |
+
else:
|
| 214 |
+
print("\n❌ Failed to generate any SBOMs.")
|
src/controllers/web_controller.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import html
|
| 6 |
+
from urllib.parse import urlparse
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from fastapi import APIRouter, Request, Form, HTTPException, Depends
|
| 10 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 11 |
+
from fastapi.templating import Jinja2Templates
|
| 12 |
+
from huggingface_hub import HfApi
|
| 13 |
+
from huggingface_hub.utils import RepositoryNotFoundError
|
| 14 |
+
|
| 15 |
+
from ..models.service import AIBOMService
|
| 16 |
+
from ..models.scoring import calculate_completeness_score
|
| 17 |
+
from ..utils.analytics import log_sbom_generation, get_sbom_count
|
| 18 |
+
from ..utils.formatter import export_aibom
|
| 19 |
+
from ..config import TEMPLATES_DIR, OUTPUT_DIR
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
router = APIRouter()
|
| 24 |
+
templates = Jinja2Templates(directory=TEMPLATES_DIR)
|
| 25 |
+
|
| 26 |
+
# --- Helpers ---
|
| 27 |
+
HF_ID_REGEX = re.compile(r"^[a-zA-Z0-9\.\-\_]+/[a-zA-Z0-9\.\-\_]+$")
|
| 28 |
+
|
| 29 |
+
def is_valid_hf_input(input_str: str) -> bool:
|
| 30 |
+
if not input_str or len(input_str) > 200:
|
| 31 |
+
return False
|
| 32 |
+
if input_str.startswith(("http://", "https://")):
|
| 33 |
+
try:
|
| 34 |
+
parsed = urlparse(input_str)
|
| 35 |
+
if parsed.netloc == "huggingface.co":
|
| 36 |
+
parts = parsed.path.strip("/").split("/")
|
| 37 |
+
if len(parts) >= 2 and parts[0] and parts[1]:
|
| 38 |
+
if re.match(r"^[a-zA-Z0-9\.\-\_]+$", parts[0]) and \
|
| 39 |
+
re.match(r"^[a-zA-Z0-9\.\-\_]+$", parts[1]):
|
| 40 |
+
return True
|
| 41 |
+
return False
|
| 42 |
+
except Exception:
|
| 43 |
+
return False
|
| 44 |
+
else:
|
| 45 |
+
return bool(HF_ID_REGEX.match(input_str))
|
| 46 |
+
|
| 47 |
+
# --- Routes ---
|
| 48 |
+
|
| 49 |
+
@router.get("/", response_class=HTMLResponse)
|
| 50 |
+
async def root(request: Request):
|
| 51 |
+
return templates.TemplateResponse("index.html", {
|
| 52 |
+
"request": request,
|
| 53 |
+
"sbom_count": get_sbom_count()
|
| 54 |
+
})
|
| 55 |
+
|
| 56 |
+
@router.get("/status")
|
| 57 |
+
async def get_status():
|
| 58 |
+
return {"status": "operational", "version": "1.0.0", "generator_version": "2.0.0"}
|
| 59 |
+
|
| 60 |
+
@router.post("/generate", response_class=HTMLResponse)
|
| 61 |
+
async def generate_form(
|
| 62 |
+
request: Request,
|
| 63 |
+
model_id: str = Form(...),
|
| 64 |
+
include_inference: bool = Form(False),
|
| 65 |
+
use_best_practices: bool = Form(True)
|
| 66 |
+
):
|
| 67 |
+
# Security: Validate BEFORE sanitizing to prevent bypass attacks
|
| 68 |
+
# (e.g., <script>org/model</script> → <script>org/model</script> could slip through)
|
| 69 |
+
if not is_valid_hf_input(model_id):
|
| 70 |
+
return templates.TemplateResponse("error.html", {
|
| 71 |
+
"request": request,
|
| 72 |
+
"error": "Invalid model ID format.",
|
| 73 |
+
"sbom_count": get_sbom_count(),
|
| 74 |
+
"model_id": html.escape(model_id)
|
| 75 |
+
})
|
| 76 |
+
|
| 77 |
+
# Sanitize after validation for safe display/storage
|
| 78 |
+
sanitized_model_id = html.escape(model_id)
|
| 79 |
+
|
| 80 |
+
# Use helper from Service to normalize
|
| 81 |
+
normalized_id = AIBOMService._normalise_model_id(sanitized_model_id)
|
| 82 |
+
|
| 83 |
+
# Check existence (non-blocking)
|
| 84 |
+
import asyncio
|
| 85 |
+
try:
|
| 86 |
+
loop = asyncio.get_running_loop()
|
| 87 |
+
await loop.run_in_executor(None, lambda: HfApi().model_info(normalized_id))
|
| 88 |
+
except RepositoryNotFoundError:
|
| 89 |
+
return templates.TemplateResponse("error.html", {
|
| 90 |
+
"request": request,
|
| 91 |
+
"error": f"Model {normalized_id} not found on Hugging Face.",
|
| 92 |
+
"sbom_count": get_sbom_count(),
|
| 93 |
+
"model_id": normalized_id
|
| 94 |
+
})
|
| 95 |
+
except Exception as e:
|
| 96 |
+
return templates.TemplateResponse("error.html", {
|
| 97 |
+
"request": request,
|
| 98 |
+
"error": f"Error verifying model: {e}",
|
| 99 |
+
"sbom_count": get_sbom_count(),
|
| 100 |
+
"model_id": normalized_id
|
| 101 |
+
})
|
| 102 |
+
|
| 103 |
+
# Generate (non-blocking)
|
| 104 |
+
try:
|
| 105 |
+
def _generate_task():
|
| 106 |
+
service = AIBOMService(use_best_practices=use_best_practices)
|
| 107 |
+
aibom = service.generate_aibom(sanitized_model_id, include_inference=include_inference)
|
| 108 |
+
report = service.get_enhancement_report()
|
| 109 |
+
return service, aibom, report
|
| 110 |
+
|
| 111 |
+
service, aibom, report = await loop.run_in_executor(None, _generate_task)
|
| 112 |
+
|
| 113 |
+
# Save file (non-blocking I/O)
|
| 114 |
+
filename = f"{normalized_id.replace('/', '_')}_ai_sbom_1_6.json"
|
| 115 |
+
filepath = os.path.join(OUTPUT_DIR, filename)
|
| 116 |
+
filepath_1_7 = os.path.join(OUTPUT_DIR, f"{normalized_id.replace('/', '_')}_ai_sbom_1_7.json")
|
| 117 |
+
|
| 118 |
+
def _save_task():
|
| 119 |
+
# Generate Formatted JSON strings
|
| 120 |
+
json_1_6 = export_aibom(aibom, bom_type="cyclonedx", spec_version="1.6")
|
| 121 |
+
json_1_7 = export_aibom(aibom, bom_type="cyclonedx", spec_version="1.7")
|
| 122 |
+
|
| 123 |
+
with open(filepath, "w") as f:
|
| 124 |
+
f.write(json_1_6)
|
| 125 |
+
with open(filepath_1_7, "w") as f:
|
| 126 |
+
f.write(json_1_7)
|
| 127 |
+
log_sbom_generation(sanitized_model_id)
|
| 128 |
+
return json_1_6, json_1_7
|
| 129 |
+
|
| 130 |
+
json_1_6, json_1_7 = await loop.run_in_executor(None, _save_task)
|
| 131 |
+
|
| 132 |
+
# Extract score
|
| 133 |
+
completeness_score = None
|
| 134 |
+
if report and "final_score" in report:
|
| 135 |
+
completeness_score = report["final_score"]
|
| 136 |
+
|
| 137 |
+
# Fallback score if needed
|
| 138 |
+
if not completeness_score:
|
| 139 |
+
completeness_score = calculate_completeness_score(aibom)
|
| 140 |
+
|
| 141 |
+
# Prepare context for template
|
| 142 |
+
context = {
|
| 143 |
+
"request": request,
|
| 144 |
+
"filename": filename,
|
| 145 |
+
"download_url": f"/output/{filename}",
|
| 146 |
+
"aibom": aibom,
|
| 147 |
+
"aibom_cdx_json_1_6": json_1_6,
|
| 148 |
+
"aibom_cdx_json_1_7": json_1_7,
|
| 149 |
+
"components_json": json.dumps(aibom.get("components", []), indent=2),
|
| 150 |
+
"model_id": normalized_id,
|
| 151 |
+
"sbom_count": get_sbom_count(),
|
| 152 |
+
"completeness_score": completeness_score,
|
| 153 |
+
"enhancement_report": report or {},
|
| 154 |
+
# Pass legacy variables for template compatibility if needed
|
| 155 |
+
"result_file": f"/output/{filename}"
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
return templates.TemplateResponse("result.html", context)
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
logger.error(f"Generation error: {e}", exc_info=True)
|
| 162 |
+
return templates.TemplateResponse("error.html", {
|
| 163 |
+
"request": request,
|
| 164 |
+
"error": f"Internal generation error: {e}",
|
| 165 |
+
"sbom_count": get_sbom_count(),
|
| 166 |
+
"model_id": normalized_id
|
| 167 |
+
})
|
src/main.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from contextlib import asynccontextmanager
|
| 5 |
+
|
| 6 |
+
from fastapi import FastAPI, Request
|
| 7 |
+
from fastapi.staticfiles import StaticFiles
|
| 8 |
+
from fastapi.responses import JSONResponse
|
| 9 |
+
|
| 10 |
+
from .config import OUTPUT_DIR, MAX_AGE_DAYS, MAX_FILES, CLEANUP_INTERVAL
|
| 11 |
+
from .controllers.web_controller import router as web_router
|
| 12 |
+
from .utils import RateLimitMiddleware, ConcurrencyLimitMiddleware, RequestSizeLimitMiddleware, perform_cleanup
|
| 13 |
+
|
| 14 |
+
# Ensure registry is initialized
|
| 15 |
+
from .models import get_field_registry_manager
|
| 16 |
+
|
| 17 |
+
logging.basicConfig(level=logging.INFO)
|
| 18 |
+
logger = logging.getLogger("aibom_generator")
|
| 19 |
+
|
| 20 |
+
@asynccontextmanager
|
| 21 |
+
async def lifespan(app: FastAPI):
|
| 22 |
+
# Startup
|
| 23 |
+
logger.info("Starting AI SBOM Generator WebApp")
|
| 24 |
+
try:
|
| 25 |
+
get_field_registry_manager() # Ensure registry is loaded
|
| 26 |
+
logger.info("Registry loaded successfully")
|
| 27 |
+
except Exception as e:
|
| 28 |
+
logger.error(f"Failed to load registry: {e}")
|
| 29 |
+
|
| 30 |
+
# Initial cleanup
|
| 31 |
+
try:
|
| 32 |
+
perform_cleanup(OUTPUT_DIR, MAX_AGE_DAYS, MAX_FILES)
|
| 33 |
+
except Exception as e:
|
| 34 |
+
logger.warning(f"Initial cleanup failed: {e}")
|
| 35 |
+
|
| 36 |
+
yield
|
| 37 |
+
# Shutdown (if needed)
|
| 38 |
+
|
| 39 |
+
app = FastAPI(title="AI SBOM Generator", lifespan=lifespan)
|
| 40 |
+
|
| 41 |
+
# --- Middleware ---
|
| 42 |
+
app.add_middleware(
|
| 43 |
+
RateLimitMiddleware,
|
| 44 |
+
rate_limit_per_minute=10,
|
| 45 |
+
rate_limit_window=60,
|
| 46 |
+
protected_routes=["/generate"]
|
| 47 |
+
)
|
| 48 |
+
app.add_middleware(
|
| 49 |
+
ConcurrencyLimitMiddleware,
|
| 50 |
+
max_concurrent_requests=5,
|
| 51 |
+
timeout=5.0,
|
| 52 |
+
protected_routes=["/generate"]
|
| 53 |
+
)
|
| 54 |
+
app.add_middleware(
|
| 55 |
+
RequestSizeLimitMiddleware,
|
| 56 |
+
max_content_length=1024*1024 # 1MB
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# --- Cleanup Middleware ---
|
| 60 |
+
request_counter = 0
|
| 61 |
+
|
| 62 |
+
@app.middleware("http")
|
| 63 |
+
async def cleanup_middleware(request: Request, call_next):
|
| 64 |
+
global request_counter
|
| 65 |
+
request_counter += 1
|
| 66 |
+
if request_counter % CLEANUP_INTERVAL == 0:
|
| 67 |
+
try:
|
| 68 |
+
removed = perform_cleanup(OUTPUT_DIR, MAX_AGE_DAYS, MAX_FILES)
|
| 69 |
+
logger.info(f"Scheduled cleanup removed {removed} files")
|
| 70 |
+
except Exception as e:
|
| 71 |
+
logger.error(f"Error during scheduled cleanup: {e}")
|
| 72 |
+
|
| 73 |
+
response = await call_next(request)
|
| 74 |
+
return response
|
| 75 |
+
|
| 76 |
+
# --- Static Files ---
|
| 77 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 78 |
+
app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output")
|
| 79 |
+
# Mount static files (CSS/JS)
|
| 80 |
+
os.makedirs("src/static", exist_ok=True)
|
| 81 |
+
app.mount("/static", StaticFiles(directory="src/static"), name="static")
|
| 82 |
+
|
| 83 |
+
# --- Routes ---
|
| 84 |
+
app.include_router(web_router)
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
import uvicorn
|
| 88 |
+
# Print clear access URL to avoid 0.0.0.0 confusion
|
| 89 |
+
print("🚀 Application ready! Access it at: http://localhost:8000")
|
| 90 |
+
uvicorn.run("src.main:app", host="0.0.0.0", port=8000, reload=True)
|
src/models/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .schemas import (
|
| 2 |
+
DataSource,
|
| 3 |
+
ConfidenceLevel,
|
| 4 |
+
ExtractionResult,
|
| 5 |
+
GenerateRequest,
|
| 6 |
+
BatchRequest,
|
| 7 |
+
AIBOMResponse,
|
| 8 |
+
EnhancementReport
|
| 9 |
+
)
|
| 10 |
+
from .registry import get_field_registry_manager
|
| 11 |
+
from .extractor import EnhancedExtractor
|
| 12 |
+
from .scoring import calculate_completeness_score, validate_aibom
|
| 13 |
+
from .service import AIBOMService
|
src/models/extractor.py
ADDED
|
@@ -0,0 +1,833 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import re
|
| 5 |
+
import yaml
|
| 6 |
+
import json
|
| 7 |
+
from typing import Dict, Any, Optional, List, Union
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from urllib.parse import urlparse, urljoin
|
| 10 |
+
|
| 11 |
+
from huggingface_hub import HfApi, ModelCard, hf_hub_download
|
| 12 |
+
from huggingface_hub.utils import RepositoryNotFoundError, EntryNotFoundError
|
| 13 |
+
|
| 14 |
+
from .schemas import DataSource, ConfidenceLevel, ExtractionResult
|
| 15 |
+
from .registry import get_field_registry_manager
|
| 16 |
+
from .model_file_extractors import ModelFileExtractor, default_extractors
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
class EnhancedExtractor:
|
| 21 |
+
"""
|
| 22 |
+
Registry-integrated enhanced extractor that automatically picks up new fields
|
| 23 |
+
from the JSON registry (field_registry.json) without requiring code changes.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
# SPDX mappings for common licences
|
| 27 |
+
LICENSE_MAPPINGS = {
|
| 28 |
+
"mit": "MIT",
|
| 29 |
+
"mit license": "MIT",
|
| 30 |
+
"apache license version 2.0": "Apache-2.0",
|
| 31 |
+
"apache license 2.0": "Apache-2.0",
|
| 32 |
+
"apache 2.0": "Apache-2.0",
|
| 33 |
+
"apache license, version 2.0": "Apache-2.0",
|
| 34 |
+
"bsd 3-clause": "BSD-3-Clause",
|
| 35 |
+
"bsd-3-clause": "BSD-3-Clause",
|
| 36 |
+
"bsd 2-clause": "BSD-2-Clause",
|
| 37 |
+
"bsd-2-clause": "BSD-2-Clause",
|
| 38 |
+
"gnu general public license v3": "GPL-3.0-only",
|
| 39 |
+
"gplv3": "GPL-3.0-only",
|
| 40 |
+
"gnu general public license v2": "GPL-2.0-only",
|
| 41 |
+
"gplv2": "GPL-2.0-only",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def __init__(self, hf_api: Optional[HfApi] = None):
|
| 45 |
+
"""
|
| 46 |
+
Initialize the enhanced extractor with registry integration.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
hf_api: Optional HuggingFace API instance (will create if not provided)
|
| 50 |
+
"""
|
| 51 |
+
self.hf_api = hf_api or HfApi()
|
| 52 |
+
self.extraction_results = {}
|
| 53 |
+
|
| 54 |
+
# Initialize registry manager
|
| 55 |
+
try:
|
| 56 |
+
self.registry_manager = get_field_registry_manager()
|
| 57 |
+
logger.info("✅ Registry manager initialized successfully")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
logger.warning(f"⚠️ Could not initialize registry manager: {e}")
|
| 60 |
+
self.registry_manager = None
|
| 61 |
+
|
| 62 |
+
# Load registry fields
|
| 63 |
+
self.registry_fields = {}
|
| 64 |
+
if self.registry_manager:
|
| 65 |
+
try:
|
| 66 |
+
self.registry_fields = self.registry_manager.get_field_definitions()
|
| 67 |
+
logger.info(f"✅ Loaded {len(self.registry_fields)} fields from registry")
|
| 68 |
+
except Exception as e:
|
| 69 |
+
logger.error(f"❌ Error loading registry fields: {e}")
|
| 70 |
+
self.registry_fields = {}
|
| 71 |
+
|
| 72 |
+
# Compiled regex patterns for text extraction
|
| 73 |
+
# Moved to class level to avoid recompilation on every request
|
| 74 |
+
PATTERNS = {
|
| 75 |
+
'license': [
|
| 76 |
+
re.compile(r'license[:\s]+([a-zA-Z0-9\-\.\s\n]+)', re.IGNORECASE | re.DOTALL),
|
| 77 |
+
re.compile(r'licensed under[:\s]+([a-zA-Z0-9\-\.\s\n]+)', re.IGNORECASE | re.DOTALL),
|
| 78 |
+
# Robust capture for markdown links [License Name](...)
|
| 79 |
+
re.compile(r'governed by[:\s]+(?:the\s+)?\[([^\]]+)\]', re.IGNORECASE | re.DOTALL),
|
| 80 |
+
re.compile(r'governed by[:\s]+(?:the\s+)?([a-zA-Z0-9\-\.\s\n]+)', re.IGNORECASE | re.DOTALL),
|
| 81 |
+
re.compile(r'governed by the[:\s]+\[([^\]]+)\]', re.IGNORECASE | re.DOTALL),
|
| 82 |
+
],
|
| 83 |
+
'datasets': [
|
| 84 |
+
re.compile(r'trained on[:\s]+([a-zA-Z0-9\-\_\/]+)', re.IGNORECASE),
|
| 85 |
+
re.compile(r'dataset[:\s]+([a-zA-Z0-9\-\_\/]+)', re.IGNORECASE),
|
| 86 |
+
re.compile(r'using[:\s]+([a-zA-Z0-9\-\_\/]+)\s+dataset', re.IGNORECASE),
|
| 87 |
+
],
|
| 88 |
+
'metrics': [
|
| 89 |
+
re.compile(r'([a-zA-Z]+)[:\s]+([0-9\.]+)', re.IGNORECASE),
|
| 90 |
+
re.compile(r'achieves[:\s]+([0-9\.]+)[:\s]+([a-zA-Z]+)', re.IGNORECASE),
|
| 91 |
+
],
|
| 92 |
+
'model_type': [
|
| 93 |
+
re.compile(r'model type[:\s]+([a-zA-Z0-9\-]+)', re.IGNORECASE),
|
| 94 |
+
re.compile(r'architecture[:\s]+([a-zA-Z0-9\-]+)', re.IGNORECASE),
|
| 95 |
+
],
|
| 96 |
+
'energy': [
|
| 97 |
+
re.compile(r'energy[:\s]+([0-9\.]+)\s*([a-zA-Z]+)', re.IGNORECASE),
|
| 98 |
+
re.compile(r'power[:\s]+([0-9\.]+)\s*([a-zA-Z]+)', re.IGNORECASE),
|
| 99 |
+
re.compile(r'consumption[:\s]+([0-9\.]+)\s*([a-zA-Z]+)', re.IGNORECASE),
|
| 100 |
+
],
|
| 101 |
+
'limitations': [
|
| 102 |
+
re.compile(r'limitation[s]?[:\s]+([^\.]+)', re.IGNORECASE),
|
| 103 |
+
re.compile(r'known issue[s]?[:\s]+([^\.]+)', re.IGNORECASE),
|
| 104 |
+
re.compile(r'constraint[s]?[:\s]+([^\.]+)', re.IGNORECASE),
|
| 105 |
+
],
|
| 106 |
+
'safety': [
|
| 107 |
+
re.compile(r'safety[:\s]+([^\.]+)', re.IGNORECASE),
|
| 108 |
+
re.compile(r'risk[s]?[:\s]+([^\.]+)', re.IGNORECASE),
|
| 109 |
+
re.compile(r'bias[:\s]+([^\.]+)', re.IGNORECASE),
|
| 110 |
+
]
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
def __init__(
|
| 114 |
+
self,
|
| 115 |
+
hf_api: Optional[HfApi] = None,
|
| 116 |
+
model_file_extractors: Optional[List[ModelFileExtractor]] = None,
|
| 117 |
+
):
|
| 118 |
+
self.hf_api = hf_api or HfApi()
|
| 119 |
+
self.extraction_results = {}
|
| 120 |
+
self.model_file_extractors = (
|
| 121 |
+
model_file_extractors if model_file_extractors is not None
|
| 122 |
+
else default_extractors()
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Initialize registry manager
|
| 126 |
+
try:
|
| 127 |
+
self.registry_manager = get_field_registry_manager()
|
| 128 |
+
logger.info("✅ Registry manager initialized successfully")
|
| 129 |
+
except Exception as e:
|
| 130 |
+
logger.warning(f"⚠️ Could not initialize registry manager: {e}")
|
| 131 |
+
self.registry_manager = None
|
| 132 |
+
|
| 133 |
+
# Load registry fields
|
| 134 |
+
self.registry_fields = {}
|
| 135 |
+
if self.registry_manager:
|
| 136 |
+
try:
|
| 137 |
+
self.registry_fields = self.registry_manager.get_field_definitions()
|
| 138 |
+
logger.info(f"✅ Loaded {len(self.registry_fields)} fields from registry")
|
| 139 |
+
except Exception as e:
|
| 140 |
+
logger.error(f"❌ Error loading registry fields: {e}")
|
| 141 |
+
self.registry_fields = {}
|
| 142 |
+
|
| 143 |
+
logger.info(f"Enhanced extractor initialized (registry-driven: {bool(self.registry_fields)})")
|
| 144 |
+
|
| 145 |
+
# def _compile_patterns(self): - Removed
|
| 146 |
+
# ...
|
| 147 |
+
|
| 148 |
+
def _detect_license_from_file(self, model_id: str) -> Optional[str]:
|
| 149 |
+
"""
|
| 150 |
+
Attempt to detect a licence by looking at repository files.
|
| 151 |
+
Downloads common licence filenames (e.g. LICENSE, LICENSE.md),
|
| 152 |
+
reads a small snippet, and returns the matching SPDX identifier,
|
| 153 |
+
or None if none match.
|
| 154 |
+
"""
|
| 155 |
+
license_filenames = ["LICENSE", "LICENSE.txt", "LICENSE.md", "LICENSE.rst", "COPYING"]
|
| 156 |
+
for filename in license_filenames:
|
| 157 |
+
try:
|
| 158 |
+
file_path = hf_hub_download(repo_id=model_id, filename=filename)
|
| 159 |
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
| 160 |
+
snippet = f.read(4096).lower()
|
| 161 |
+
for header, spdx_id in self.LICENSE_MAPPINGS.items():
|
| 162 |
+
if header in snippet:
|
| 163 |
+
return spdx_id
|
| 164 |
+
except (RepositoryNotFoundError, EntryNotFoundError):
|
| 165 |
+
# file doesn’t exist; continue
|
| 166 |
+
continue
|
| 167 |
+
except Exception as e:
|
| 168 |
+
logger.debug(f"Licence detection error reading {filename}: {e}")
|
| 169 |
+
continue
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
def extract_metadata(self, model_id: str, model_info: Dict[str, Any], model_card: Optional[ModelCard], enable_summarization: bool = False) -> Dict[str, Any]:
|
| 173 |
+
"""
|
| 174 |
+
Main extraction method with full registry integration.
|
| 175 |
+
"""
|
| 176 |
+
logger.info(f"🚀 Starting registry-driven extraction for model: {model_id}")
|
| 177 |
+
|
| 178 |
+
# Initialize extraction results tracking
|
| 179 |
+
self.extraction_results = {}
|
| 180 |
+
metadata = {}
|
| 181 |
+
|
| 182 |
+
if self.registry_fields:
|
| 183 |
+
# Registry-driven extraction
|
| 184 |
+
logger.info(f"📋 Registry-driven mode: Attempting extraction for {len(self.registry_fields)} fields")
|
| 185 |
+
metadata = self._registry_driven_extraction(model_id, model_info, model_card, enable_summarization)
|
| 186 |
+
else:
|
| 187 |
+
# Fallback to legacy extraction
|
| 188 |
+
logger.warning("⚠️ Registry not available, falling back to legacy extraction")
|
| 189 |
+
metadata = self._legacy_extraction(model_id, model_info, model_card)
|
| 190 |
+
|
| 191 |
+
# Return metadata in the same format as original method
|
| 192 |
+
return {k: v for k, v in metadata.items() if v is not None}
|
| 193 |
+
|
| 194 |
+
def _registry_driven_extraction(self, model_id: str, model_info: Dict[str, Any], model_card: Optional[ModelCard], enable_summarization: bool = False) -> Dict[str, Any]:
|
| 195 |
+
"""
|
| 196 |
+
Registry-driven extraction that automatically processes all registry fields.
|
| 197 |
+
"""
|
| 198 |
+
metadata = {}
|
| 199 |
+
|
| 200 |
+
# Prepare extraction context
|
| 201 |
+
extraction_context = {
|
| 202 |
+
'model_id': model_id,
|
| 203 |
+
'model_info': model_info,
|
| 204 |
+
'model_card': model_card,
|
| 205 |
+
'readme_content': self._get_readme_content(model_card, model_id),
|
| 206 |
+
'config_data': self._download_and_parse_config(model_id, "config.json"),
|
| 207 |
+
'tokenizer_config': self._download_and_parse_config(model_id, "tokenizer_config.json"),
|
| 208 |
+
'enable_summarization': enable_summarization
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
# Process each field from the registry
|
| 212 |
+
successful_extractions = 0
|
| 213 |
+
failed_extractions = 0
|
| 214 |
+
|
| 215 |
+
for field_name, field_config in self.registry_fields.items():
|
| 216 |
+
try:
|
| 217 |
+
logger.info(f"🔍 Attempting extraction for field: {field_name}")
|
| 218 |
+
|
| 219 |
+
# Extract field using registry configuration
|
| 220 |
+
extracted_value = self._extract_registry_field(field_name, field_config, extraction_context)
|
| 221 |
+
|
| 222 |
+
if extracted_value is not None:
|
| 223 |
+
metadata[field_name] = extracted_value
|
| 224 |
+
successful_extractions += 1
|
| 225 |
+
else:
|
| 226 |
+
failed_extractions += 1
|
| 227 |
+
|
| 228 |
+
except Exception as e:
|
| 229 |
+
failed_extractions += 1
|
| 230 |
+
logger.error(f"❌ Error extracting {field_name}: {e}")
|
| 231 |
+
continue
|
| 232 |
+
|
| 233 |
+
logger.info(f"📊 Registry extraction complete: {successful_extractions} successful, {failed_extractions} failed")
|
| 234 |
+
|
| 235 |
+
model_file_metadata = self._extract_model_file_metadata(model_id)
|
| 236 |
+
if model_file_metadata:
|
| 237 |
+
for key, value in model_file_metadata.items():
|
| 238 |
+
if value is not None:
|
| 239 |
+
metadata[key] = value
|
| 240 |
+
self.extraction_results[key] = ExtractionResult(
|
| 241 |
+
value=value,
|
| 242 |
+
source=DataSource.REPOSITORY_FILES,
|
| 243 |
+
confidence=ConfidenceLevel.HIGH,
|
| 244 |
+
extraction_method="model_file_header",
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# Always extract commit SHA if available (vital for BOM versioning)
|
| 248 |
+
if 'commit' not in metadata:
|
| 249 |
+
commit_sha = getattr(model_info, 'sha', None)
|
| 250 |
+
if commit_sha:
|
| 251 |
+
metadata['commit'] = commit_sha
|
| 252 |
+
|
| 253 |
+
# Add external references (always needed)
|
| 254 |
+
metadata.update(self._generate_external_references(model_id, metadata))
|
| 255 |
+
|
| 256 |
+
return metadata
|
| 257 |
+
|
| 258 |
+
def _extract_model_file_metadata(self, model_id: str) -> Dict[str, Any]:
|
| 259 |
+
for extractor in self.model_file_extractors:
|
| 260 |
+
try:
|
| 261 |
+
if extractor.can_extract(model_id):
|
| 262 |
+
metadata = extractor.extract_metadata(model_id)
|
| 263 |
+
if metadata:
|
| 264 |
+
logger.info(
|
| 265 |
+
f"{type(extractor).__name__} returned {len(metadata)} fields"
|
| 266 |
+
)
|
| 267 |
+
return metadata
|
| 268 |
+
except Exception as e:
|
| 269 |
+
logger.warning(
|
| 270 |
+
f"Model file extraction failed ({type(extractor).__name__}): {e}"
|
| 271 |
+
)
|
| 272 |
+
continue
|
| 273 |
+
return {}
|
| 274 |
+
|
| 275 |
+
def _extract_registry_field(self, field_name: str, field_config: Dict[str, Any], context: Dict[str, Any]) -> Any:
|
| 276 |
+
"""
|
| 277 |
+
Extract a single field based on its registry configuration.
|
| 278 |
+
"""
|
| 279 |
+
if field_name == 'license':
|
| 280 |
+
logger.warning(f"DEBUG: Extracting license...")
|
| 281 |
+
|
| 282 |
+
extraction_methods = []
|
| 283 |
+
|
| 284 |
+
# Strategy 1: Direct API extraction
|
| 285 |
+
api_value = self._try_api_extraction(field_name, context)
|
| 286 |
+
if api_value is not None:
|
| 287 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 288 |
+
value=api_value,
|
| 289 |
+
source=DataSource.HF_API,
|
| 290 |
+
confidence=ConfidenceLevel.HIGH,
|
| 291 |
+
extraction_method="api_direct"
|
| 292 |
+
)
|
| 293 |
+
return api_value
|
| 294 |
+
|
| 295 |
+
# Strategy 2: Model card YAML extraction
|
| 296 |
+
yaml_value = self._try_model_card_extraction(field_name, context)
|
| 297 |
+
if yaml_value is not None:
|
| 298 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 299 |
+
value=yaml_value,
|
| 300 |
+
source=DataSource.MODEL_CARD,
|
| 301 |
+
confidence=ConfidenceLevel.HIGH,
|
| 302 |
+
extraction_method="model_card_yaml"
|
| 303 |
+
)
|
| 304 |
+
return yaml_value
|
| 305 |
+
|
| 306 |
+
# Strategy 3: Configuration file extraction
|
| 307 |
+
config_value = self._try_config_extraction(field_name, context)
|
| 308 |
+
if config_value is not None:
|
| 309 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 310 |
+
value=config_value,
|
| 311 |
+
source=DataSource.CONFIG_FILE,
|
| 312 |
+
confidence=ConfidenceLevel.HIGH,
|
| 313 |
+
extraction_method="config_file"
|
| 314 |
+
)
|
| 315 |
+
return config_value
|
| 316 |
+
|
| 317 |
+
# Strategy 4: Text pattern extraction
|
| 318 |
+
text_value = self._try_text_pattern_extraction(field_name, context)
|
| 319 |
+
if text_value is not None:
|
| 320 |
+
# ...
|
| 321 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 322 |
+
value=text_value,
|
| 323 |
+
source=DataSource.README_TEXT,
|
| 324 |
+
confidence=ConfidenceLevel.MEDIUM,
|
| 325 |
+
extraction_method="text_pattern"
|
| 326 |
+
)
|
| 327 |
+
return text_value
|
| 328 |
+
|
| 329 |
+
# Strategy 5: Intelligent inference
|
| 330 |
+
inferred_value = self._try_intelligent_inference(field_name, context)
|
| 331 |
+
if inferred_value is not None:
|
| 332 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 333 |
+
value=inferred_value,
|
| 334 |
+
source=DataSource.INTELLIGENT_DEFAULT,
|
| 335 |
+
confidence=ConfidenceLevel.MEDIUM,
|
| 336 |
+
extraction_method="intelligent_inference"
|
| 337 |
+
)
|
| 338 |
+
return inferred_value
|
| 339 |
+
|
| 340 |
+
# detect licence from repository files if the field is licence/ licences
|
| 341 |
+
if field_name in {"license", "licenses"}:
|
| 342 |
+
detected = self._detect_license_from_file(context["model_id"])
|
| 343 |
+
if detected:
|
| 344 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 345 |
+
value=detected,
|
| 346 |
+
source=DataSource.REPOSITORY_FILES,
|
| 347 |
+
confidence=ConfidenceLevel.MEDIUM,
|
| 348 |
+
extraction_method="license_file",
|
| 349 |
+
fallback_chain=extraction_methods,
|
| 350 |
+
)
|
| 351 |
+
return detected
|
| 352 |
+
|
| 353 |
+
if field_name == "description":
|
| 354 |
+
# Try intelligent summarization if description is missing AND enabled
|
| 355 |
+
if context.get('enable_summarization', False):
|
| 356 |
+
try:
|
| 357 |
+
from ..utils.summarizer import LocalSummarizer
|
| 358 |
+
readme = context.get('readme_content')
|
| 359 |
+
if readme:
|
| 360 |
+
summary = LocalSummarizer.summarize(readme, model_id=context.get('model_id', ''))
|
| 361 |
+
if summary:
|
| 362 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 363 |
+
value=summary,
|
| 364 |
+
source=DataSource.INTELLIGENT_DEFAULT,
|
| 365 |
+
confidence=ConfidenceLevel.MEDIUM,
|
| 366 |
+
extraction_method="llm_summarization",
|
| 367 |
+
fallback_chain=extraction_methods
|
| 368 |
+
)
|
| 369 |
+
return summary
|
| 370 |
+
except ImportError:
|
| 371 |
+
pass
|
| 372 |
+
except Exception as e:
|
| 373 |
+
logger.debug(f"Summarization processing failed: {e}")
|
| 374 |
+
|
| 375 |
+
# Strategy 6: Fallback value (if configured)
|
| 376 |
+
fallback_value = self._try_fallback_value(field_name, field_config)
|
| 377 |
+
if fallback_value is not None:
|
| 378 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 379 |
+
value=fallback_value,
|
| 380 |
+
source=DataSource.PLACEHOLDER,
|
| 381 |
+
confidence=ConfidenceLevel.NONE,
|
| 382 |
+
extraction_method="fallback_placeholder",
|
| 383 |
+
fallback_chain=extraction_methods
|
| 384 |
+
)
|
| 385 |
+
return fallback_value
|
| 386 |
+
|
| 387 |
+
# No extraction successful
|
| 388 |
+
self.extraction_results[field_name] = ExtractionResult(
|
| 389 |
+
value=None,
|
| 390 |
+
source=DataSource.PLACEHOLDER,
|
| 391 |
+
confidence=ConfidenceLevel.NONE,
|
| 392 |
+
extraction_method="extraction_failed",
|
| 393 |
+
fallback_chain=extraction_methods
|
| 394 |
+
)
|
| 395 |
+
return None
|
| 396 |
+
|
| 397 |
+
def _extract_paper_link(self, info: Any) -> Union[str, List[str], None]:
|
| 398 |
+
# 1. Check card_data for explicit paper field
|
| 399 |
+
if hasattr(info, 'card_data') and info.card_data:
|
| 400 |
+
paper = getattr(info.card_data, 'paper', None)
|
| 401 |
+
if paper:
|
| 402 |
+
return paper
|
| 403 |
+
|
| 404 |
+
# 2. Check tags for arxiv: ID
|
| 405 |
+
papers = []
|
| 406 |
+
if hasattr(info, 'tags') and info.tags:
|
| 407 |
+
for tag in info.tags:
|
| 408 |
+
if isinstance(tag, str) and tag.startswith('arxiv:'):
|
| 409 |
+
papers.append(f"https://arxiv.org/abs/{tag.split(':', 1)[1]}")
|
| 410 |
+
|
| 411 |
+
return papers if papers else None
|
| 412 |
+
|
| 413 |
+
def _try_api_extraction(self, field_name: str, context: Dict[str, Any]) -> Any:
|
| 414 |
+
"""Try to extract field from HuggingFace API data"""
|
| 415 |
+
model_info = context.get('model_info')
|
| 416 |
+
if not model_info:
|
| 417 |
+
return None
|
| 418 |
+
|
| 419 |
+
# Field mapping for API extraction
|
| 420 |
+
api_mappings = {
|
| 421 |
+
'author': lambda info: getattr(info, 'author', None) or context['model_id'].split('/')[0],
|
| 422 |
+
'name': lambda info: getattr(info, 'modelId', context['model_id']).split('/')[-1],
|
| 423 |
+
'tags': lambda info: getattr(info, 'tags', []),
|
| 424 |
+
'pipeline_tag': lambda info: getattr(info, 'pipeline_tag', None),
|
| 425 |
+
'downloads': lambda info: getattr(info, 'downloads', 0),
|
| 426 |
+
'commit': lambda info: getattr(info, 'sha', '') if getattr(info, 'sha', None) else None,
|
| 427 |
+
'suppliedBy': lambda info: getattr(info, 'author', None) or context['model_id'].split('/')[0],
|
| 428 |
+
'primaryPurpose': lambda info: getattr(info, 'pipeline_tag', 'text-generation'),
|
| 429 |
+
'downloadLocation': lambda info: f"https://huggingface.co/{context['model_id']}/tree/main",
|
| 430 |
+
'license': lambda info: getattr(info.card_data, 'license', None) if hasattr(info, 'card_data') and info.card_data else None,
|
| 431 |
+
'licenses': lambda info: getattr(info.card_data, 'license', None) if hasattr(info, 'card_data') and info.card_data else None,
|
| 432 |
+
'datasets': lambda info: getattr(info.card_data, 'datasets', []) if hasattr(info, 'card_data') and info.card_data else [],
|
| 433 |
+
'paper': self._extract_paper_link
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
if field_name in api_mappings:
|
| 437 |
+
try:
|
| 438 |
+
val = api_mappings[field_name](model_info)
|
| 439 |
+
# If valid value found, return it (filtering out "other")
|
| 440 |
+
if val:
|
| 441 |
+
# Special handling for lists (datasets, tags, paper) - don't lowercase/string convert immmediately
|
| 442 |
+
if field_name in ["datasets", "tags", "external_references", "paper"]:
|
| 443 |
+
return val
|
| 444 |
+
|
| 445 |
+
str_val = str(val).lower()
|
| 446 |
+
if isinstance(val, list) and len(val) > 0:
|
| 447 |
+
str_val = str(val[0]).lower()
|
| 448 |
+
|
| 449 |
+
# Enhanced filtering for "other" variants
|
| 450 |
+
ignored_values = {"other", "['other']", "other license", "other-license", "unknown"}
|
| 451 |
+
if str_val not in ignored_values:
|
| 452 |
+
return val
|
| 453 |
+
return None
|
| 454 |
+
except Exception as e:
|
| 455 |
+
logger.debug(f"API extraction failed for {field_name}: {e}")
|
| 456 |
+
return None
|
| 457 |
+
|
| 458 |
+
return None
|
| 459 |
+
|
| 460 |
+
def _try_model_card_extraction(self, field_name: str, context: Dict[str, Any]) -> Any:
|
| 461 |
+
"""Try to extract field from model card YAML frontmatter"""
|
| 462 |
+
model_card = context.get('model_card')
|
| 463 |
+
if not model_card or not hasattr(model_card, 'data') or not model_card.data:
|
| 464 |
+
return None
|
| 465 |
+
|
| 466 |
+
try:
|
| 467 |
+
card_data = model_card.data.to_dict() if hasattr(model_card.data, 'to_dict') else {}
|
| 468 |
+
|
| 469 |
+
# Field mapping for model card extraction
|
| 470 |
+
card_mappings = {
|
| 471 |
+
'license': 'license',
|
| 472 |
+
'language': 'language',
|
| 473 |
+
'library_name': 'library_name',
|
| 474 |
+
'base_model': 'base_model',
|
| 475 |
+
'datasets': 'datasets',
|
| 476 |
+
'description': ['model_summary', 'description'],
|
| 477 |
+
'typeOfModel': 'model_type',
|
| 478 |
+
'licenses': 'license' # Alternative mapping
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
if field_name in card_mappings:
|
| 482 |
+
mapping = card_mappings[field_name]
|
| 483 |
+
if isinstance(mapping, list):
|
| 484 |
+
# Try multiple keys
|
| 485 |
+
for key in mapping:
|
| 486 |
+
value = card_data.get(key)
|
| 487 |
+
if value:
|
| 488 |
+
return value
|
| 489 |
+
else:
|
| 490 |
+
val = card_data.get(mapping)
|
| 491 |
+
if val:
|
| 492 |
+
str_val = str(val).lower()
|
| 493 |
+
if isinstance(val, list) and len(val) > 0:
|
| 494 |
+
str_val = str(val[0]).lower()
|
| 495 |
+
|
| 496 |
+
ignored_values = {"other", "['other']", "other license", "other-license", "unknown"}
|
| 497 |
+
return val if str_val not in ignored_values else None
|
| 498 |
+
return None
|
| 499 |
+
|
| 500 |
+
# Direct field name lookup
|
| 501 |
+
val = card_data.get(field_name)
|
| 502 |
+
if val:
|
| 503 |
+
str_val = str(val).lower()
|
| 504 |
+
if isinstance(val, list) and len(val) > 0:
|
| 505 |
+
str_val = str(val[0]).lower()
|
| 506 |
+
return val if str_val != "other" else None
|
| 507 |
+
return None
|
| 508 |
+
|
| 509 |
+
except Exception as e:
|
| 510 |
+
logger.debug(f"Model card extraction failed for {field_name}: {e}")
|
| 511 |
+
return None
|
| 512 |
+
|
| 513 |
+
def _try_config_extraction(self, field_name: str, context: Dict[str, Any]) -> Any:
|
| 514 |
+
"""Try to extract field from configuration files"""
|
| 515 |
+
# Config file mappings
|
| 516 |
+
config_mappings = {
|
| 517 |
+
'model_type': ('config_data', 'model_type'),
|
| 518 |
+
'architectures': ('config_data', 'architectures'),
|
| 519 |
+
'vocab_size': ('config_data', 'vocab_size'),
|
| 520 |
+
'tokenizer_class': ('tokenizer_config', 'tokenizer_class'),
|
| 521 |
+
'typeOfModel': ('config_data', 'model_type')
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
if field_name in config_mappings:
|
| 525 |
+
config_type, config_key = config_mappings[field_name]
|
| 526 |
+
config_source = context.get(config_type)
|
| 527 |
+
if config_source:
|
| 528 |
+
return config_source.get(config_key)
|
| 529 |
+
|
| 530 |
+
return None
|
| 531 |
+
|
| 532 |
+
def _try_text_pattern_extraction(self, field_name: str, context: Dict[str, Any]) -> Any:
|
| 533 |
+
"""Try to extract field using text pattern matching"""
|
| 534 |
+
readme_content = context.get('readme_content')
|
| 535 |
+
if not readme_content:
|
| 536 |
+
return None
|
| 537 |
+
|
| 538 |
+
# Pattern mappings for different fields
|
| 539 |
+
pattern_mappings = {
|
| 540 |
+
'license': 'license',
|
| 541 |
+
'licenses': 'license', # Fix: Handle plural key
|
| 542 |
+
'datasets': 'datasets',
|
| 543 |
+
'energyConsumption': 'energy',
|
| 544 |
+
'technicalLimitations': 'limitations',
|
| 545 |
+
'safetyRiskAssessment': 'safety',
|
| 546 |
+
'model_type': 'model_type'
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
if field_name in pattern_mappings:
|
| 550 |
+
pattern_key = pattern_mappings[field_name]
|
| 551 |
+
if pattern_key in self.PATTERNS:
|
| 552 |
+
# Need to implement _find_pattern_matches which was missing in original snippet but used
|
| 553 |
+
matches = self._find_pattern_matches(readme_content, self.PATTERNS[pattern_key])
|
| 554 |
+
if matches:
|
| 555 |
+
# Prefer longest match for critical fields where "the" or short noise might appear
|
| 556 |
+
if field_name in ['license', 'licenses']:
|
| 557 |
+
return max(matches, key=len)
|
| 558 |
+
# Prefer string for critical fields
|
| 559 |
+
if field_name in ['model_type']:
|
| 560 |
+
return matches[0]
|
| 561 |
+
return matches[0] if len(matches) == 1 else matches
|
| 562 |
+
|
| 563 |
+
return None
|
| 564 |
+
|
| 565 |
+
def _find_pattern_matches(self, content: str, patterns: List[re.Pattern]) -> List[str]:
|
| 566 |
+
"""Find matches for a list of patterns in content"""
|
| 567 |
+
matches = []
|
| 568 |
+
for pattern in patterns:
|
| 569 |
+
match = pattern.search(content)
|
| 570 |
+
if match:
|
| 571 |
+
# Replace newlines/tabs with single space
|
| 572 |
+
val = re.sub(r'\s+', ' ', match.group(1)).strip()
|
| 573 |
+
# Filtering: 'the' is never a license, and generic "other" values
|
| 574 |
+
ignored_values = {
|
| 575 |
+
"the", "other", "other license", "other-license", "unknown",
|
| 576 |
+
"vision", "text", "audio", "image", "video", "data", "dataset", "datasets",
|
| 577 |
+
"training", "eval", "evaluation"
|
| 578 |
+
}
|
| 579 |
+
if val.lower() in ignored_values:
|
| 580 |
+
continue
|
| 581 |
+
matches.append(val)
|
| 582 |
+
return list(set(matches)) # Return unique matches
|
| 583 |
+
|
| 584 |
+
def _try_intelligent_inference(self, field_name: str, context: Dict[str, Any]) -> Any:
|
| 585 |
+
"""Try to infer field value from other available data"""
|
| 586 |
+
model_id = context['model_id']
|
| 587 |
+
|
| 588 |
+
# Intelligent inference rules
|
| 589 |
+
inference_rules = {
|
| 590 |
+
'author': lambda: model_id.split('/')[0] if '/' in model_id else 'unknown',
|
| 591 |
+
'suppliedBy': lambda: model_id.split('/')[0] if '/' in model_id else 'unknown',
|
| 592 |
+
'name': lambda: model_id.split('/')[-1],
|
| 593 |
+
'primaryPurpose': lambda: 'text-generation', # Default for most HF models
|
| 594 |
+
'typeOfModel': lambda: 'transformer', # Default for most HF models
|
| 595 |
+
'downloadLocation': lambda: f"https://huggingface.co/{model_id}/tree/main",
|
| 596 |
+
'bomFormat': lambda: 'CycloneDX',
|
| 597 |
+
'specVersion': lambda: '1.6',
|
| 598 |
+
'serialNumber': lambda: f"urn:uuid:{model_id.replace('/', '-')}",
|
| 599 |
+
'version': lambda: '1.0.0'
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
if field_name in inference_rules:
|
| 603 |
+
try:
|
| 604 |
+
return inference_rules[field_name]()
|
| 605 |
+
except Exception as e:
|
| 606 |
+
logger.debug(f"Intelligent inference failed for {field_name}: {e}")
|
| 607 |
+
return None
|
| 608 |
+
|
| 609 |
+
return None
|
| 610 |
+
|
| 611 |
+
def _try_fallback_value(self, field_name: str, field_config: Dict[str, Any]) -> Any:
|
| 612 |
+
"""Try to get fallback value from field configuration"""
|
| 613 |
+
# Check if field config has fallback value
|
| 614 |
+
if isinstance(field_config, dict):
|
| 615 |
+
fallback = field_config.get('fallback_value')
|
| 616 |
+
if fallback:
|
| 617 |
+
return fallback
|
| 618 |
+
|
| 619 |
+
# Standard fallback values for common fields
|
| 620 |
+
standard_fallbacks = {
|
| 621 |
+
'license': 'NOASSERTION',
|
| 622 |
+
'description': 'No description available',
|
| 623 |
+
'version': '1.0.0',
|
| 624 |
+
'bomFormat': 'CycloneDX',
|
| 625 |
+
'specVersion': '1.6'
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
return standard_fallbacks.get(field_name)
|
| 629 |
+
|
| 630 |
+
def _legacy_extraction(self, model_id: str, model_info: Dict[str, Any], model_card: Optional[ModelCard]) -> Dict[str, Any]:
|
| 631 |
+
"""
|
| 632 |
+
Fallback to legacy extraction when registry is not available.
|
| 633 |
+
This maintains backward compatibility.
|
| 634 |
+
"""
|
| 635 |
+
logger.info("🔄 Executing legacy extraction mode")
|
| 636 |
+
metadata = {}
|
| 637 |
+
|
| 638 |
+
# Execute legacy extraction layers
|
| 639 |
+
metadata.update(self._layer1_structured_api(model_id, model_info, model_card))
|
| 640 |
+
metadata.update(self._layer2_repository_files(model_id))
|
| 641 |
+
metadata.update(self._layer3_stp_extraction(model_card, model_id))
|
| 642 |
+
metadata.update(self._layer4_external_references(model_id, metadata))
|
| 643 |
+
metadata.update(self._layer5_intelligent_defaults(model_id, metadata))
|
| 644 |
+
|
| 645 |
+
return metadata
|
| 646 |
+
|
| 647 |
+
def _generate_external_references(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
| 648 |
+
"""Generate external references for the model"""
|
| 649 |
+
external_refs = []
|
| 650 |
+
|
| 651 |
+
# Model repository
|
| 652 |
+
repo_url = f"https://huggingface.co/{model_id}"
|
| 653 |
+
external_refs.append({
|
| 654 |
+
"type": "website",
|
| 655 |
+
"url": repo_url,
|
| 656 |
+
"comment": "Model repository"
|
| 657 |
+
})
|
| 658 |
+
|
| 659 |
+
# Model files
|
| 660 |
+
files_url = f"https://huggingface.co/{model_id}/tree/main"
|
| 661 |
+
external_refs.append({
|
| 662 |
+
"type": "distribution",
|
| 663 |
+
"url": files_url,
|
| 664 |
+
"comment": "Model files"
|
| 665 |
+
})
|
| 666 |
+
|
| 667 |
+
# Commit URL if available
|
| 668 |
+
if 'commit' in metadata:
|
| 669 |
+
commit_url = f"https://huggingface.co/{model_id}/commit/{metadata['commit']}"
|
| 670 |
+
external_refs.append({
|
| 671 |
+
"type": "vcs",
|
| 672 |
+
"url": commit_url,
|
| 673 |
+
"comment": "Specific commit"
|
| 674 |
+
})
|
| 675 |
+
|
| 676 |
+
# Dataset references
|
| 677 |
+
if 'datasets' in metadata:
|
| 678 |
+
datasets = metadata['datasets']
|
| 679 |
+
if isinstance(datasets, list):
|
| 680 |
+
for dataset in datasets:
|
| 681 |
+
if isinstance(dataset, str):
|
| 682 |
+
dataset_url = f"https://huggingface.co/datasets/{dataset}"
|
| 683 |
+
external_refs.append({
|
| 684 |
+
"type": "distribution",
|
| 685 |
+
"url": dataset_url,
|
| 686 |
+
"comment": f"Training dataset: {dataset}"
|
| 687 |
+
})
|
| 688 |
+
|
| 689 |
+
# In current structure, we don't store into self.extraction_results here as a side effect if we can avoid it.
|
| 690 |
+
# But for tracing, we might want to.
|
| 691 |
+
|
| 692 |
+
return {'external_references': external_refs}
|
| 693 |
+
|
| 694 |
+
# Legacy methods for backward compatibility
|
| 695 |
+
def _layer1_structured_api(self, model_id: str, model_info: Dict[str, Any], model_card: Optional[ModelCard]) -> Dict[str, Any]:
|
| 696 |
+
"""Legacy Layer 1: Enhanced structured data extraction from HF API and model card."""
|
| 697 |
+
metadata = {}
|
| 698 |
+
# Enhanced model info extraction
|
| 699 |
+
if model_info:
|
| 700 |
+
try:
|
| 701 |
+
author = getattr(model_info, "author", None)
|
| 702 |
+
if not author or author.strip() == "":
|
| 703 |
+
parts = model_id.split("/")
|
| 704 |
+
author = parts[0] if len(parts) > 1 else "unknown"
|
| 705 |
+
|
| 706 |
+
metadata['author'] = author
|
| 707 |
+
metadata['name'] = getattr(model_info, "modelId", model_id).split("/")[-1]
|
| 708 |
+
metadata['tags'] = getattr(model_info, "tags", [])
|
| 709 |
+
metadata['pipeline_tag'] = getattr(model_info, "pipeline_tag", None)
|
| 710 |
+
metadata['downloads'] = getattr(model_info, "downloads", 0)
|
| 711 |
+
|
| 712 |
+
commit_sha = getattr(model_info, "sha", None)
|
| 713 |
+
if commit_sha:
|
| 714 |
+
metadata['commit'] = commit_sha
|
| 715 |
+
except Exception:
|
| 716 |
+
pass
|
| 717 |
+
|
| 718 |
+
if model_card and hasattr(model_card, "data") and model_card.data:
|
| 719 |
+
try:
|
| 720 |
+
card_data = model_card.data.to_dict() if hasattr(model_card.data, "to_dict") else {}
|
| 721 |
+
metadata['license'] = card_data.get("license")
|
| 722 |
+
metadata['language'] = card_data.get("language")
|
| 723 |
+
metadata['library_name'] = card_data.get("library_name")
|
| 724 |
+
metadata['base_model'] = card_data.get("base_model")
|
| 725 |
+
metadata['datasets'] = card_data.get("datasets")
|
| 726 |
+
metadata['description'] = card_data.get("model_summary") or card_data.get("description")
|
| 727 |
+
except Exception:
|
| 728 |
+
pass
|
| 729 |
+
|
| 730 |
+
metadata["primaryPurpose"] = metadata.get("pipeline_tag", "text-generation")
|
| 731 |
+
metadata["suppliedBy"] = metadata.get("author", "unknown")
|
| 732 |
+
metadata["typeOfModel"] = "transformer"
|
| 733 |
+
return metadata
|
| 734 |
+
|
| 735 |
+
def _layer2_repository_files(self, model_id: str) -> Dict[str, Any]:
|
| 736 |
+
"""Legacy Layer 2: Repository file analysis"""
|
| 737 |
+
metadata = {}
|
| 738 |
+
try:
|
| 739 |
+
config_data = self._download_and_parse_config(model_id, "config.json")
|
| 740 |
+
if config_data:
|
| 741 |
+
metadata['model_type'] = config_data.get("model_type")
|
| 742 |
+
metadata['architectures'] = config_data.get("architectures", [])
|
| 743 |
+
metadata['vocab_size'] = config_data.get("vocab_size")
|
| 744 |
+
|
| 745 |
+
tokenizer_config = self._download_and_parse_config(model_id, "tokenizer_config.json")
|
| 746 |
+
if tokenizer_config:
|
| 747 |
+
metadata['tokenizer_class'] = tokenizer_config.get("tokenizer_class")
|
| 748 |
+
|
| 749 |
+
if "license" not in metadata or not metadata["license"]:
|
| 750 |
+
detected_license = self._detect_license_from_file(model_id)
|
| 751 |
+
if detected_license:
|
| 752 |
+
metadata["license"] = detected_license
|
| 753 |
+
except Exception:
|
| 754 |
+
pass
|
| 755 |
+
return metadata
|
| 756 |
+
|
| 757 |
+
def _layer3_stp_extraction(self, model_card: Optional[ModelCard], model_id: str) -> Dict[str, Any]:
|
| 758 |
+
"""Legacy Layer 3: Smart Text Parsing"""
|
| 759 |
+
metadata = {}
|
| 760 |
+
try:
|
| 761 |
+
readme_content = self._get_readme_content(model_card, model_id)
|
| 762 |
+
if readme_content:
|
| 763 |
+
extracted_info = self._extract_from_text(readme_content)
|
| 764 |
+
metadata.update(extracted_info)
|
| 765 |
+
|
| 766 |
+
license_from_text = extracted_info.get("license_from_text")
|
| 767 |
+
if license_from_text and not metadata.get("license"):
|
| 768 |
+
if isinstance(license_from_text, list):
|
| 769 |
+
metadata["license"] = license_from_text[0]
|
| 770 |
+
else:
|
| 771 |
+
metadata["license"] = license_from_text
|
| 772 |
+
except Exception:
|
| 773 |
+
pass
|
| 774 |
+
return metadata
|
| 775 |
+
|
| 776 |
+
def _layer4_external_references(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
| 777 |
+
"""Legacy Layer 4: External reference generation"""
|
| 778 |
+
return self._generate_external_references(model_id, metadata)
|
| 779 |
+
|
| 780 |
+
def _layer5_intelligent_defaults(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
| 781 |
+
"""Legacy Layer 5: Intelligent default generation"""
|
| 782 |
+
if 'author' not in metadata or not metadata['author']:
|
| 783 |
+
parts = model_id.split("/")
|
| 784 |
+
metadata['author'] = parts[0] if len(parts) > 1 else "unknown"
|
| 785 |
+
if 'license' not in metadata or not metadata['license']:
|
| 786 |
+
metadata['license'] = "NOASSERTION"
|
| 787 |
+
return metadata
|
| 788 |
+
|
| 789 |
+
def _fetch_with_backoff(self, fetch_func, *args, max_retries=3, initial_backoff=1.0, **kwargs):
|
| 790 |
+
import time
|
| 791 |
+
for attempt in range(max_retries):
|
| 792 |
+
try:
|
| 793 |
+
return fetch_func(*args, **kwargs)
|
| 794 |
+
except Exception as e:
|
| 795 |
+
error_msg = str(e)
|
| 796 |
+
if "401" in error_msg or "404" in error_msg: # Auth or not found don't retry
|
| 797 |
+
raise e
|
| 798 |
+
if attempt == max_retries - 1:
|
| 799 |
+
raise e
|
| 800 |
+
time.sleep(initial_backoff * (2 ** attempt))
|
| 801 |
+
|
| 802 |
+
def _download_and_parse_config(self, model_id: str, filename: str) -> Optional[Dict[str, Any]]:
|
| 803 |
+
"""Download and parse a JSON config file from the model repository"""
|
| 804 |
+
import json
|
| 805 |
+
try:
|
| 806 |
+
file_path = self._fetch_with_backoff(hf_hub_download, repo_id=model_id, filename=filename)
|
| 807 |
+
with open(file_path, 'r') as f:
|
| 808 |
+
return json.load(f)
|
| 809 |
+
except (RepositoryNotFoundError, EntryNotFoundError, json.JSONDecodeError):
|
| 810 |
+
return None
|
| 811 |
+
except Exception:
|
| 812 |
+
return None
|
| 813 |
+
|
| 814 |
+
def _get_readme_content(self, model_card: Optional[ModelCard], model_id: str) -> Optional[str]:
|
| 815 |
+
"""Get README content from model card or by downloading"""
|
| 816 |
+
try:
|
| 817 |
+
if model_card and hasattr(model_card, 'content'):
|
| 818 |
+
return model_card.content
|
| 819 |
+
readme_path = self._fetch_with_backoff(hf_hub_download, repo_id=model_id, filename="README.md")
|
| 820 |
+
with open(readme_path, 'r', encoding='utf-8') as f:
|
| 821 |
+
return f.read()
|
| 822 |
+
except Exception:
|
| 823 |
+
return None
|
| 824 |
+
|
| 825 |
+
def _extract_from_text(self, text: str) -> Dict[str, Any]:
|
| 826 |
+
"""Extract structured information from unstructured text (Legacy Helper)"""
|
| 827 |
+
# Minimal implementation for legacy support, utilizing the patterns we already have
|
| 828 |
+
metadata = {}
|
| 829 |
+
for category, patterns in self.PATTERNS.items():
|
| 830 |
+
matches = self._find_pattern_matches(text, patterns)
|
| 831 |
+
if matches:
|
| 832 |
+
metadata[category] = matches[0] if len(matches) == 1 else matches
|
| 833 |
+
return metadata
|
src/models/field_registry.json
ADDED
|
@@ -0,0 +1,1714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"registry_metadata": {
|
| 3 |
+
"description": "Field registry for configurable AI SBOM generation and scoring"
|
| 4 |
+
},
|
| 5 |
+
"scoring_config": {
|
| 6 |
+
"tier_weights": {
|
| 7 |
+
"critical": 3,
|
| 8 |
+
"important": 2,
|
| 9 |
+
"supplementary": 1
|
| 10 |
+
},
|
| 11 |
+
"category_weights": {
|
| 12 |
+
"required_fields": 20,
|
| 13 |
+
"metadata": 20,
|
| 14 |
+
"component_basic": 20,
|
| 15 |
+
"component_model_card": 30,
|
| 16 |
+
"external_references": 10
|
| 17 |
+
},
|
| 18 |
+
"scoring_profiles": {
|
| 19 |
+
"basic": {
|
| 20 |
+
"description": "Minimal fields required for identification",
|
| 21 |
+
"required_categories": [
|
| 22 |
+
"required_fields",
|
| 23 |
+
"component_basic"
|
| 24 |
+
],
|
| 25 |
+
"required_fields": [
|
| 26 |
+
"bomFormat",
|
| 27 |
+
"specVersion",
|
| 28 |
+
"serialNumber",
|
| 29 |
+
"version",
|
| 30 |
+
"name"
|
| 31 |
+
],
|
| 32 |
+
"minimum_score": 40,
|
| 33 |
+
"weight_multiplier": 1.0
|
| 34 |
+
},
|
| 35 |
+
"standard": {
|
| 36 |
+
"description": "Comprehensive fields for proper documentation",
|
| 37 |
+
"required_categories": [
|
| 38 |
+
"required_fields",
|
| 39 |
+
"metadata",
|
| 40 |
+
"component_basic"
|
| 41 |
+
],
|
| 42 |
+
"required_fields": [
|
| 43 |
+
"bomFormat",
|
| 44 |
+
"specVersion",
|
| 45 |
+
"serialNumber",
|
| 46 |
+
"version",
|
| 47 |
+
"name",
|
| 48 |
+
"downloadLocation",
|
| 49 |
+
"primaryPurpose",
|
| 50 |
+
"suppliedBy"
|
| 51 |
+
],
|
| 52 |
+
"minimum_score": 70,
|
| 53 |
+
"weight_multiplier": 1.0
|
| 54 |
+
},
|
| 55 |
+
"advanced": {
|
| 56 |
+
"description": "Extensive documentation for maximum transparency",
|
| 57 |
+
"required_categories": [
|
| 58 |
+
"required_fields",
|
| 59 |
+
"metadata",
|
| 60 |
+
"component_basic",
|
| 61 |
+
"component_model_card",
|
| 62 |
+
"external_references"
|
| 63 |
+
],
|
| 64 |
+
"required_fields": [
|
| 65 |
+
"bomFormat",
|
| 66 |
+
"specVersion",
|
| 67 |
+
"serialNumber",
|
| 68 |
+
"version",
|
| 69 |
+
"name",
|
| 70 |
+
"downloadLocation",
|
| 71 |
+
"primaryPurpose",
|
| 72 |
+
"suppliedBy",
|
| 73 |
+
"type",
|
| 74 |
+
"purl",
|
| 75 |
+
"description",
|
| 76 |
+
"licenses",
|
| 77 |
+
"hyperparameter",
|
| 78 |
+
"technicalLimitations",
|
| 79 |
+
"energyConsumption",
|
| 80 |
+
"safetyRiskAssessment",
|
| 81 |
+
"typeOfModel"
|
| 82 |
+
],
|
| 83 |
+
"minimum_score": 85,
|
| 84 |
+
"weight_multiplier": 1.0
|
| 85 |
+
}
|
| 86 |
+
},
|
| 87 |
+
"algorithm_config": {
|
| 88 |
+
"type": "weighted_sum",
|
| 89 |
+
"max_score": 100,
|
| 90 |
+
"normalization": "category_based",
|
| 91 |
+
"penalty_for_missing_critical": 0.5,
|
| 92 |
+
"bonus_for_complete_categories": 0.1
|
| 93 |
+
}
|
| 94 |
+
},
|
| 95 |
+
"aibom_config": {
|
| 96 |
+
"structure_template": "cyclonedx_1.6",
|
| 97 |
+
"generator_info": {
|
| 98 |
+
"name": "owasp-aibom-generator",
|
| 99 |
+
"version": "1.0.0",
|
| 100 |
+
"manufacturer": "OWASP GenAI Security Project"
|
| 101 |
+
},
|
| 102 |
+
"generation_rules": {
|
| 103 |
+
"include_metadata_properties": true,
|
| 104 |
+
"include_model_card": true,
|
| 105 |
+
"include_external_references": true,
|
| 106 |
+
"include_dependencies": true
|
| 107 |
+
},
|
| 108 |
+
"validation_rules": {
|
| 109 |
+
"require_critical_fields": true,
|
| 110 |
+
"validate_jsonpath_expressions": true,
|
| 111 |
+
"enforce_cyclonedx_schema": true
|
| 112 |
+
}
|
| 113 |
+
},
|
| 114 |
+
"fields": {
|
| 115 |
+
"bomFormat": {
|
| 116 |
+
"tier": "critical",
|
| 117 |
+
"weight": 4.0,
|
| 118 |
+
"category": "required_fields",
|
| 119 |
+
"description": "Format identifier for the SBOM",
|
| 120 |
+
"jsonpath": "$.bomFormat",
|
| 121 |
+
"aibom_generation": {
|
| 122 |
+
"location": "$.bomFormat",
|
| 123 |
+
"rule": "always_include",
|
| 124 |
+
"source_fields": [
|
| 125 |
+
"bomFormat"
|
| 126 |
+
],
|
| 127 |
+
"validation": "required",
|
| 128 |
+
"data_type": "string"
|
| 129 |
+
},
|
| 130 |
+
"scoring": {
|
| 131 |
+
"points": 4.0,
|
| 132 |
+
"required_for_profiles": [
|
| 133 |
+
"basic",
|
| 134 |
+
"standard",
|
| 135 |
+
"advanced"
|
| 136 |
+
],
|
| 137 |
+
"category_contribution": 0.2
|
| 138 |
+
},
|
| 139 |
+
"validation_message": {
|
| 140 |
+
"missing": "Missing critical field: bomFormat - essential for SBOM identification",
|
| 141 |
+
"recommendation": "Ensure bomFormat is set to 'CycloneDX'"
|
| 142 |
+
},
|
| 143 |
+
"reference_urls": {
|
| 144 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#bomFormat",
|
| 145 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#bomFormat",
|
| 146 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/core/"
|
| 147 |
+
}
|
| 148 |
+
},
|
| 149 |
+
"datasets": {
|
| 150 |
+
"tier": "important",
|
| 151 |
+
"weight": 3.0,
|
| 152 |
+
"category": "component_model_card",
|
| 153 |
+
"description": "Datasets used for training",
|
| 154 |
+
"jsonpath": "$.component.modelCard.modelParameters.datasets",
|
| 155 |
+
"aibom_generation": {
|
| 156 |
+
"location": "$.component.modelCard.modelParameters.datasets",
|
| 157 |
+
"rule": "include_if_available",
|
| 158 |
+
"source_fields": [
|
| 159 |
+
"datasets",
|
| 160 |
+
"dataset",
|
| 161 |
+
"data"
|
| 162 |
+
],
|
| 163 |
+
"validation": "recommended",
|
| 164 |
+
"data_type": "array"
|
| 165 |
+
},
|
| 166 |
+
"scoring": {
|
| 167 |
+
"points": 3.0,
|
| 168 |
+
"required_for_profiles": [
|
| 169 |
+
"standard",
|
| 170 |
+
"advanced"
|
| 171 |
+
],
|
| 172 |
+
"category_contribution": 0.1
|
| 173 |
+
},
|
| 174 |
+
"validation_message": {
|
| 175 |
+
"missing": "Missing field: datasets - training data information important for transparency",
|
| 176 |
+
"recommendation": "Add information about the datasets used to train the model"
|
| 177 |
+
},
|
| 178 |
+
"reference_urls": {
|
| 179 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_modelParameters_datasets",
|
| 180 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_modelParameters_datasets",
|
| 181 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/dataset/"
|
| 182 |
+
}
|
| 183 |
+
},
|
| 184 |
+
"paper": {
|
| 185 |
+
"tier": "supplementary",
|
| 186 |
+
"weight": 2.0,
|
| 187 |
+
"category": "external_references",
|
| 188 |
+
"description": "Research paper associated with the model",
|
| 189 |
+
"jsonpath": "$.metadata.component.externalReferences[?(@.type=='documentation')]",
|
| 190 |
+
"aibom_generation": {
|
| 191 |
+
"location": "none",
|
| 192 |
+
"rule": "include_if_present",
|
| 193 |
+
"source_fields": [
|
| 194 |
+
"paper"
|
| 195 |
+
],
|
| 196 |
+
"validation": "optional",
|
| 197 |
+
"data_type": "string"
|
| 198 |
+
},
|
| 199 |
+
"extraction": {
|
| 200 |
+
"methods": [
|
| 201 |
+
"api"
|
| 202 |
+
],
|
| 203 |
+
"source_priority": [
|
| 204 |
+
"api"
|
| 205 |
+
]
|
| 206 |
+
},
|
| 207 |
+
"scoring": {
|
| 208 |
+
"points": 2.0,
|
| 209 |
+
"required_for_profiles": [
|
| 210 |
+
"advanced"
|
| 211 |
+
],
|
| 212 |
+
"category_contribution": 0.2
|
| 213 |
+
},
|
| 214 |
+
"validation_message": {
|
| 215 |
+
"missing": "No research paper link found",
|
| 216 |
+
"recommendation": "Add ArXiv tag or paper link to model card"
|
| 217 |
+
},
|
| 218 |
+
"reference_urls": {
|
| 219 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_externalReferences",
|
| 220 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_externalReferences",
|
| 221 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 222 |
+
}
|
| 223 |
+
},
|
| 224 |
+
"vcs": {
|
| 225 |
+
"tier": "supplementary",
|
| 226 |
+
"weight": 4.0,
|
| 227 |
+
"category": "external_references",
|
| 228 |
+
"description": "Version control system URL",
|
| 229 |
+
"jsonpath": "$.components[0].externalReferences[?(@.type=='vcs')].url",
|
| 230 |
+
"aibom_generation": {
|
| 231 |
+
"location": "none",
|
| 232 |
+
"rule": "include_if_present",
|
| 233 |
+
"source_fields": [
|
| 234 |
+
"vcs",
|
| 235 |
+
"repository"
|
| 236 |
+
],
|
| 237 |
+
"validation": "optional",
|
| 238 |
+
"data_type": "string"
|
| 239 |
+
},
|
| 240 |
+
"extraction": {
|
| 241 |
+
"methods": [
|
| 242 |
+
"api"
|
| 243 |
+
],
|
| 244 |
+
"source_priority": [
|
| 245 |
+
"api"
|
| 246 |
+
]
|
| 247 |
+
},
|
| 248 |
+
"scoring": {
|
| 249 |
+
"points": 4.0,
|
| 250 |
+
"required_for_profiles": [
|
| 251 |
+
"advanced"
|
| 252 |
+
],
|
| 253 |
+
"category_contribution": 0.4
|
| 254 |
+
},
|
| 255 |
+
"validation_message": {
|
| 256 |
+
"missing": "No VCS link found",
|
| 257 |
+
"recommendation": "Add repository link to model card"
|
| 258 |
+
},
|
| 259 |
+
"reference_urls": {
|
| 260 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_externalReferences",
|
| 261 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_externalReferences",
|
| 262 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
"website": {
|
| 266 |
+
"tier": "supplementary",
|
| 267 |
+
"weight": 4.0,
|
| 268 |
+
"category": "external_references",
|
| 269 |
+
"description": "Model website or documentation URL",
|
| 270 |
+
"jsonpath": "$.components[0].externalReferences[?(@.type=='website')].url",
|
| 271 |
+
"aibom_generation": {
|
| 272 |
+
"location": "none",
|
| 273 |
+
"rule": "include_if_present",
|
| 274 |
+
"source_fields": [
|
| 275 |
+
"website",
|
| 276 |
+
"url"
|
| 277 |
+
],
|
| 278 |
+
"validation": "optional",
|
| 279 |
+
"data_type": "string"
|
| 280 |
+
},
|
| 281 |
+
"extraction": {
|
| 282 |
+
"methods": [
|
| 283 |
+
"api"
|
| 284 |
+
],
|
| 285 |
+
"source_priority": [
|
| 286 |
+
"api"
|
| 287 |
+
]
|
| 288 |
+
},
|
| 289 |
+
"scoring": {
|
| 290 |
+
"points": 4.0,
|
| 291 |
+
"required_for_profiles": [
|
| 292 |
+
"advanced"
|
| 293 |
+
],
|
| 294 |
+
"category_contribution": 0.4
|
| 295 |
+
},
|
| 296 |
+
"validation_message": {
|
| 297 |
+
"missing": "No website link found",
|
| 298 |
+
"recommendation": "Add website link to model card"
|
| 299 |
+
},
|
| 300 |
+
"reference_urls": {
|
| 301 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_externalReferences",
|
| 302 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_externalReferences",
|
| 303 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 304 |
+
}
|
| 305 |
+
},
|
| 306 |
+
"specVersion": {
|
| 307 |
+
"tier": "critical",
|
| 308 |
+
"weight": 4.0,
|
| 309 |
+
"category": "required_fields",
|
| 310 |
+
"description": "CycloneDX specification version",
|
| 311 |
+
"jsonpath": "$.specVersion",
|
| 312 |
+
"aibom_generation": {
|
| 313 |
+
"location": "$.specVersion",
|
| 314 |
+
"rule": "always_include",
|
| 315 |
+
"source_fields": [
|
| 316 |
+
"specVersion"
|
| 317 |
+
],
|
| 318 |
+
"validation": "required",
|
| 319 |
+
"data_type": "string"
|
| 320 |
+
},
|
| 321 |
+
"scoring": {
|
| 322 |
+
"points": 4.0,
|
| 323 |
+
"required_for_profiles": [
|
| 324 |
+
"basic",
|
| 325 |
+
"standard",
|
| 326 |
+
"advanced"
|
| 327 |
+
],
|
| 328 |
+
"category_contribution": 0.2
|
| 329 |
+
},
|
| 330 |
+
"validation_message": {
|
| 331 |
+
"missing": "Missing critical field: specVersion - required for CycloneDX compliance",
|
| 332 |
+
"recommendation": "Set specVersion to '1.6' for CycloneDX 1.6 compliance"
|
| 333 |
+
},
|
| 334 |
+
"reference_urls": {
|
| 335 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#specVersion",
|
| 336 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#specVersion",
|
| 337 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/core/"
|
| 338 |
+
}
|
| 339 |
+
},
|
| 340 |
+
"serialNumber": {
|
| 341 |
+
"tier": "critical",
|
| 342 |
+
"weight": 4.0,
|
| 343 |
+
"category": "required_fields",
|
| 344 |
+
"description": "Unique identifier for this SBOM instance",
|
| 345 |
+
"jsonpath": "$.serialNumber",
|
| 346 |
+
"aibom_generation": {
|
| 347 |
+
"location": "$.serialNumber",
|
| 348 |
+
"rule": "always_include",
|
| 349 |
+
"source_fields": [
|
| 350 |
+
"serialNumber"
|
| 351 |
+
],
|
| 352 |
+
"validation": "required",
|
| 353 |
+
"data_type": "string"
|
| 354 |
+
},
|
| 355 |
+
"scoring": {
|
| 356 |
+
"points": 4.0,
|
| 357 |
+
"required_for_profiles": [
|
| 358 |
+
"basic",
|
| 359 |
+
"standard",
|
| 360 |
+
"advanced"
|
| 361 |
+
],
|
| 362 |
+
"category_contribution": 0.2
|
| 363 |
+
},
|
| 364 |
+
"validation_message": {
|
| 365 |
+
"missing": "Missing critical field: serialNumber - unique identifier required",
|
| 366 |
+
"recommendation": "Generate a UUID for the SBOM instance"
|
| 367 |
+
},
|
| 368 |
+
"reference_urls": {
|
| 369 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#serialNumber",
|
| 370 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#serialNumber",
|
| 371 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/core/"
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"version": {
|
| 375 |
+
"tier": "critical",
|
| 376 |
+
"weight": 4.0,
|
| 377 |
+
"category": "required_fields",
|
| 378 |
+
"description": "Version of this SBOM document",
|
| 379 |
+
"jsonpath": "$.version",
|
| 380 |
+
"aibom_generation": {
|
| 381 |
+
"location": "$.version",
|
| 382 |
+
"rule": "always_include",
|
| 383 |
+
"source_fields": [
|
| 384 |
+
"version"
|
| 385 |
+
],
|
| 386 |
+
"validation": "required",
|
| 387 |
+
"data_type": "integer"
|
| 388 |
+
},
|
| 389 |
+
"scoring": {
|
| 390 |
+
"points": 4.0,
|
| 391 |
+
"required_for_profiles": [
|
| 392 |
+
"basic",
|
| 393 |
+
"standard",
|
| 394 |
+
"advanced"
|
| 395 |
+
],
|
| 396 |
+
"category_contribution": 0.2
|
| 397 |
+
},
|
| 398 |
+
"validation_message": {
|
| 399 |
+
"missing": "Missing critical field: version - document version required",
|
| 400 |
+
"recommendation": "Set version to 1 for initial SBOM generation"
|
| 401 |
+
},
|
| 402 |
+
"reference_urls": {
|
| 403 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#version",
|
| 404 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#version",
|
| 405 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/core/"
|
| 406 |
+
}
|
| 407 |
+
},
|
| 408 |
+
"primaryPurpose": {
|
| 409 |
+
"tier": "critical",
|
| 410 |
+
"weight": 4.0,
|
| 411 |
+
"category": "metadata",
|
| 412 |
+
"description": "Primary purpose or task of the AI model",
|
| 413 |
+
"jsonpath": "$.component.modelCard.modelParameters.task",
|
| 414 |
+
"aibom_generation": {
|
| 415 |
+
"location": "$.component.modelCard.modelParameters.task",
|
| 416 |
+
"rule": "include_if_available",
|
| 417 |
+
"source_fields": [
|
| 418 |
+
"primaryPurpose",
|
| 419 |
+
"pipeline_tag",
|
| 420 |
+
"ai:task"
|
| 421 |
+
],
|
| 422 |
+
"validation": "recommended",
|
| 423 |
+
"data_type": "string"
|
| 424 |
+
},
|
| 425 |
+
"scoring": {
|
| 426 |
+
"points": 4.0,
|
| 427 |
+
"required_for_profiles": [
|
| 428 |
+
"standard",
|
| 429 |
+
"advanced"
|
| 430 |
+
],
|
| 431 |
+
"category_contribution": 0.2
|
| 432 |
+
},
|
| 433 |
+
"validation_message": {
|
| 434 |
+
"missing": "Missing critical field: primaryPurpose - essential for understanding model intent",
|
| 435 |
+
"recommendation": "Add the primary task or purpose of the AI model"
|
| 436 |
+
},
|
| 437 |
+
"reference_urls": {
|
| 438 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_modelParameters_approach",
|
| 439 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_modelParameters_approach",
|
| 440 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 441 |
+
}
|
| 442 |
+
},
|
| 443 |
+
"suppliedBy": {
|
| 444 |
+
"tier": "critical",
|
| 445 |
+
"weight": 4.0,
|
| 446 |
+
"category": "metadata",
|
| 447 |
+
"description": "Organization or individual that supplied the model",
|
| 448 |
+
"jsonpath": "$.component.supplier.name",
|
| 449 |
+
"aibom_generation": {
|
| 450 |
+
"location": "$.component.supplier",
|
| 451 |
+
"rule": "include_if_available",
|
| 452 |
+
"source_fields": [
|
| 453 |
+
"suppliedBy",
|
| 454 |
+
"author",
|
| 455 |
+
"publisher"
|
| 456 |
+
],
|
| 457 |
+
"validation": "recommended",
|
| 458 |
+
"data_type": "string"
|
| 459 |
+
},
|
| 460 |
+
"scoring": {
|
| 461 |
+
"points": 4.0,
|
| 462 |
+
"required_for_profiles": [
|
| 463 |
+
"standard",
|
| 464 |
+
"advanced"
|
| 465 |
+
],
|
| 466 |
+
"category_contribution": 0.2
|
| 467 |
+
},
|
| 468 |
+
"validation_message": {
|
| 469 |
+
"missing": "Missing critical field: suppliedBy - supplier identification required",
|
| 470 |
+
"recommendation": "Add the organization or individual who provided the model"
|
| 471 |
+
},
|
| 472 |
+
"reference_urls": {
|
| 473 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_supplier",
|
| 474 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_supplier",
|
| 475 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/"
|
| 476 |
+
}
|
| 477 |
+
},
|
| 478 |
+
"standardCompliance": {
|
| 479 |
+
"tier": "supplementary",
|
| 480 |
+
"weight": 1.0,
|
| 481 |
+
"category": "metadata",
|
| 482 |
+
"description": "Standards or regulations the model complies with",
|
| 483 |
+
"jsonpath": "$.metadata.properties[?(@.name=='standardCompliance')].value",
|
| 484 |
+
"aibom_generation": {
|
| 485 |
+
"location": "$.metadata.properties",
|
| 486 |
+
"rule": "include_if_available",
|
| 487 |
+
"source_fields": [
|
| 488 |
+
"standardCompliance",
|
| 489 |
+
"compliance"
|
| 490 |
+
],
|
| 491 |
+
"validation": "optional",
|
| 492 |
+
"data_type": "string"
|
| 493 |
+
},
|
| 494 |
+
"scoring": {
|
| 495 |
+
"points": 1.0,
|
| 496 |
+
"required_for_profiles": [
|
| 497 |
+
"advanced"
|
| 498 |
+
],
|
| 499 |
+
"category_contribution": 0.05
|
| 500 |
+
},
|
| 501 |
+
"validation_message": {
|
| 502 |
+
"missing": "Missing supplementary field: standardCompliance - compliance information helpful",
|
| 503 |
+
"recommendation": "Add any relevant standards or regulations the model complies with"
|
| 504 |
+
},
|
| 505 |
+
"reference_urls": {
|
| 506 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-standardCompliance",
|
| 507 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 508 |
+
}
|
| 509 |
+
},
|
| 510 |
+
"external_references": {
|
| 511 |
+
"tier": "supplementary",
|
| 512 |
+
"weight": 1.0,
|
| 513 |
+
"category": "component_basic",
|
| 514 |
+
"description": "Additional external references",
|
| 515 |
+
"jsonpath": "$.component.externalReferences",
|
| 516 |
+
"aibom_generation": {
|
| 517 |
+
"location": "$.component.externalReferences",
|
| 518 |
+
"rule": "include_if_available",
|
| 519 |
+
"source_fields": [
|
| 520 |
+
"external_references",
|
| 521 |
+
"references",
|
| 522 |
+
"citations"
|
| 523 |
+
],
|
| 524 |
+
"validation": "optional",
|
| 525 |
+
"data_type": "array"
|
| 526 |
+
},
|
| 527 |
+
"scoring": {
|
| 528 |
+
"points": 1.0,
|
| 529 |
+
"required_for_profiles": [
|
| 530 |
+
"advanced"
|
| 531 |
+
],
|
| 532 |
+
"category_contribution": 0.05
|
| 533 |
+
},
|
| 534 |
+
"validation_message": {
|
| 535 |
+
"missing": "Missing supplementary field: external_references - additional references helpful",
|
| 536 |
+
"recommendation": "Add links to papers, documentation, or other resources"
|
| 537 |
+
},
|
| 538 |
+
"reference_urls": {
|
| 539 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_externalReferences",
|
| 540 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_externalReferences",
|
| 541 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 542 |
+
}
|
| 543 |
+
},
|
| 544 |
+
"domain": {
|
| 545 |
+
"tier": "supplementary",
|
| 546 |
+
"weight": 1.0,
|
| 547 |
+
"category": "metadata",
|
| 548 |
+
"description": "Domain or field of application",
|
| 549 |
+
"jsonpath": "$.metadata.properties[?(@.name=='domain')].value",
|
| 550 |
+
"aibom_generation": {
|
| 551 |
+
"location": "$.metadata.properties",
|
| 552 |
+
"rule": "include_if_available",
|
| 553 |
+
"source_fields": [
|
| 554 |
+
"domain",
|
| 555 |
+
"field",
|
| 556 |
+
"application_area"
|
| 557 |
+
],
|
| 558 |
+
"validation": "optional",
|
| 559 |
+
"data_type": "string"
|
| 560 |
+
},
|
| 561 |
+
"scoring": {
|
| 562 |
+
"points": 1.0,
|
| 563 |
+
"required_for_profiles": [
|
| 564 |
+
"advanced"
|
| 565 |
+
],
|
| 566 |
+
"category_contribution": 0.05
|
| 567 |
+
},
|
| 568 |
+
"validation_message": {
|
| 569 |
+
"missing": "Missing supplementary field: domain - application domain helpful for context",
|
| 570 |
+
"recommendation": "Add the domain or field where this model is typically applied"
|
| 571 |
+
},
|
| 572 |
+
"reference_urls": {
|
| 573 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-domain",
|
| 574 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 575 |
+
}
|
| 576 |
+
},
|
| 577 |
+
"autonomyType": {
|
| 578 |
+
"tier": "supplementary",
|
| 579 |
+
"weight": 1.0,
|
| 580 |
+
"category": "metadata",
|
| 581 |
+
"description": "Level of autonomy or human involvement required",
|
| 582 |
+
"jsonpath": "$.metadata.properties[?(@.name=='autonomyType')].value",
|
| 583 |
+
"aibom_generation": {
|
| 584 |
+
"location": "$.metadata.properties",
|
| 585 |
+
"rule": "include_if_available",
|
| 586 |
+
"source_fields": [
|
| 587 |
+
"autonomyType",
|
| 588 |
+
"autonomy_level"
|
| 589 |
+
],
|
| 590 |
+
"validation": "optional",
|
| 591 |
+
"data_type": "string"
|
| 592 |
+
},
|
| 593 |
+
"scoring": {
|
| 594 |
+
"points": 1.0,
|
| 595 |
+
"required_for_profiles": [
|
| 596 |
+
"advanced"
|
| 597 |
+
],
|
| 598 |
+
"category_contribution": 0.05
|
| 599 |
+
},
|
| 600 |
+
"validation_message": {
|
| 601 |
+
"missing": "Missing supplementary field: autonomyType - autonomy level information helpful",
|
| 602 |
+
"recommendation": "Add information about the level of human oversight required"
|
| 603 |
+
},
|
| 604 |
+
"reference_urls": {
|
| 605 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-autonomyType",
|
| 606 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 607 |
+
}
|
| 608 |
+
},
|
| 609 |
+
"name": {
|
| 610 |
+
"tier": "critical",
|
| 611 |
+
"weight": 4.0,
|
| 612 |
+
"category": "component_basic",
|
| 613 |
+
"description": "Name of the AI model component",
|
| 614 |
+
"jsonpath": "$.components[0].name",
|
| 615 |
+
"aibom_generation": {
|
| 616 |
+
"location": "$.components[0].name",
|
| 617 |
+
"rule": "always_include",
|
| 618 |
+
"source_fields": [
|
| 619 |
+
"name",
|
| 620 |
+
"model_name"
|
| 621 |
+
],
|
| 622 |
+
"validation": "required",
|
| 623 |
+
"data_type": "string"
|
| 624 |
+
},
|
| 625 |
+
"scoring": {
|
| 626 |
+
"points": 4.0,
|
| 627 |
+
"required_for_profiles": [
|
| 628 |
+
"basic",
|
| 629 |
+
"standard",
|
| 630 |
+
"advanced"
|
| 631 |
+
],
|
| 632 |
+
"category_contribution": 0.2
|
| 633 |
+
},
|
| 634 |
+
"validation_message": {
|
| 635 |
+
"missing": "Missing critical field: name - essential for model identification",
|
| 636 |
+
"recommendation": "Add a descriptive name for the model"
|
| 637 |
+
},
|
| 638 |
+
"reference_urls": {
|
| 639 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_name",
|
| 640 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_name",
|
| 641 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/"
|
| 642 |
+
}
|
| 643 |
+
},
|
| 644 |
+
"type": {
|
| 645 |
+
"tier": "critical",
|
| 646 |
+
"weight": 4.0,
|
| 647 |
+
"category": "component_basic",
|
| 648 |
+
"description": "Type of component (machine-learning-model)",
|
| 649 |
+
"jsonpath": "$.components[0].type",
|
| 650 |
+
"aibom_generation": {
|
| 651 |
+
"location": "$.components[0].type",
|
| 652 |
+
"rule": "always_include",
|
| 653 |
+
"source_fields": [
|
| 654 |
+
"type"
|
| 655 |
+
],
|
| 656 |
+
"validation": "required",
|
| 657 |
+
"data_type": "string"
|
| 658 |
+
},
|
| 659 |
+
"scoring": {
|
| 660 |
+
"points": 4.0,
|
| 661 |
+
"required_for_profiles": [
|
| 662 |
+
"basic",
|
| 663 |
+
"standard",
|
| 664 |
+
"advanced"
|
| 665 |
+
],
|
| 666 |
+
"category_contribution": 0.2
|
| 667 |
+
},
|
| 668 |
+
"validation_message": {
|
| 669 |
+
"missing": "Missing field: type - component type classification needed",
|
| 670 |
+
"recommendation": "Set type to 'machine-learning-model' for AI models"
|
| 671 |
+
},
|
| 672 |
+
"reference_urls": {
|
| 673 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_type",
|
| 674 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_type",
|
| 675 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/"
|
| 676 |
+
}
|
| 677 |
+
},
|
| 678 |
+
"component_version": {
|
| 679 |
+
"tier": "critical",
|
| 680 |
+
"weight": 4.0,
|
| 681 |
+
"category": "component_basic",
|
| 682 |
+
"description": "Version of the component",
|
| 683 |
+
"jsonpath": "$.components[0].version",
|
| 684 |
+
"aibom_generation": {
|
| 685 |
+
"location": "$.components[0].version",
|
| 686 |
+
"rule": "always_include",
|
| 687 |
+
"source_fields": [
|
| 688 |
+
"version"
|
| 689 |
+
],
|
| 690 |
+
"validation": "required",
|
| 691 |
+
"data_type": "string"
|
| 692 |
+
},
|
| 693 |
+
"scoring": {
|
| 694 |
+
"points": 4.0,
|
| 695 |
+
"required_for_profiles": [
|
| 696 |
+
"basic",
|
| 697 |
+
"standard",
|
| 698 |
+
"advanced"
|
| 699 |
+
],
|
| 700 |
+
"category_contribution": 0.2
|
| 701 |
+
},
|
| 702 |
+
"validation_message": {
|
| 703 |
+
"missing": "Missing field: version - component version needed",
|
| 704 |
+
"recommendation": "Set an appropriate version for the component"
|
| 705 |
+
},
|
| 706 |
+
"reference_urls": {
|
| 707 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_version",
|
| 708 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_version",
|
| 709 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/"
|
| 710 |
+
}
|
| 711 |
+
},
|
| 712 |
+
"purl": {
|
| 713 |
+
"tier": "important",
|
| 714 |
+
"weight": 3.0,
|
| 715 |
+
"category": "component_basic",
|
| 716 |
+
"description": "Package URL identifier",
|
| 717 |
+
"jsonpath": "$.components[0].purl",
|
| 718 |
+
"aibom_generation": {
|
| 719 |
+
"location": "$.components[0].purl",
|
| 720 |
+
"rule": "include_if_available",
|
| 721 |
+
"source_fields": [
|
| 722 |
+
"purl",
|
| 723 |
+
"package_url"
|
| 724 |
+
],
|
| 725 |
+
"validation": "recommended",
|
| 726 |
+
"data_type": "string"
|
| 727 |
+
},
|
| 728 |
+
"scoring": {
|
| 729 |
+
"points": 3.0,
|
| 730 |
+
"required_for_profiles": [
|
| 731 |
+
"standard",
|
| 732 |
+
"advanced"
|
| 733 |
+
],
|
| 734 |
+
"category_contribution": 0.15
|
| 735 |
+
},
|
| 736 |
+
"validation_message": {
|
| 737 |
+
"missing": "Missing field: purl - package URL for identification",
|
| 738 |
+
"recommendation": "Add a Package URL (PURL) for the model"
|
| 739 |
+
},
|
| 740 |
+
"reference_urls": {
|
| 741 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_purl",
|
| 742 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_purl",
|
| 743 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/Package/"
|
| 744 |
+
}
|
| 745 |
+
},
|
| 746 |
+
"description": {
|
| 747 |
+
"tier": "important",
|
| 748 |
+
"weight": 3.0,
|
| 749 |
+
"category": "component_basic",
|
| 750 |
+
"description": "Description of the AI model",
|
| 751 |
+
"jsonpath": "$.components[0].description",
|
| 752 |
+
"aibom_generation": {
|
| 753 |
+
"location": "$.components[0].description",
|
| 754 |
+
"rule": "include_if_available",
|
| 755 |
+
"source_fields": [
|
| 756 |
+
"description",
|
| 757 |
+
"summary"
|
| 758 |
+
],
|
| 759 |
+
"validation": "recommended",
|
| 760 |
+
"data_type": "string"
|
| 761 |
+
},
|
| 762 |
+
"scoring": {
|
| 763 |
+
"points": 3.0,
|
| 764 |
+
"required_for_profiles": [
|
| 765 |
+
"standard",
|
| 766 |
+
"advanced"
|
| 767 |
+
],
|
| 768 |
+
"category_contribution": 0.15
|
| 769 |
+
},
|
| 770 |
+
"validation_message": {
|
| 771 |
+
"missing": "Missing field: description - model description helpful for understanding",
|
| 772 |
+
"recommendation": "Add a clear description of what the model does"
|
| 773 |
+
},
|
| 774 |
+
"reference_urls": {
|
| 775 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_description",
|
| 776 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_description",
|
| 777 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/software/"
|
| 778 |
+
}
|
| 779 |
+
},
|
| 780 |
+
"licenses": {
|
| 781 |
+
"tier": "important",
|
| 782 |
+
"weight": 3.0,
|
| 783 |
+
"category": "component_basic",
|
| 784 |
+
"description": "License information for the model",
|
| 785 |
+
"jsonpath": "$.components[0].licenses",
|
| 786 |
+
"aibom_generation": {
|
| 787 |
+
"location": "$.components[0].licenses",
|
| 788 |
+
"rule": "include_if_available",
|
| 789 |
+
"source_fields": [
|
| 790 |
+
"licenses",
|
| 791 |
+
"license"
|
| 792 |
+
],
|
| 793 |
+
"validation": "recommended",
|
| 794 |
+
"data_type": "array"
|
| 795 |
+
},
|
| 796 |
+
"scoring": {
|
| 797 |
+
"points": 3.0,
|
| 798 |
+
"required_for_profiles": [
|
| 799 |
+
"standard",
|
| 800 |
+
"advanced"
|
| 801 |
+
],
|
| 802 |
+
"category_contribution": 0.15
|
| 803 |
+
},
|
| 804 |
+
"validation_message": {
|
| 805 |
+
"missing": "Missing field: licenses - license information important for compliance",
|
| 806 |
+
"recommendation": "Add license information for the model"
|
| 807 |
+
},
|
| 808 |
+
"reference_urls": {
|
| 809 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_licenses",
|
| 810 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_licenses",
|
| 811 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/simple_licensing/"
|
| 812 |
+
}
|
| 813 |
+
},
|
| 814 |
+
"ethicalConsiderations": {
|
| 815 |
+
"tier": "important",
|
| 816 |
+
"weight": 2.0,
|
| 817 |
+
"category": "component_model_card",
|
| 818 |
+
"description": "Ethical considerations and fairness assessments",
|
| 819 |
+
"jsonpath": "$.component.modelCard.considerations.ethicalConsiderations[0].description",
|
| 820 |
+
"aibom_generation": {
|
| 821 |
+
"location": "$.component.modelCard.considerations.ethicalConsiderations",
|
| 822 |
+
"rule": "include_if_available",
|
| 823 |
+
"source_fields": [
|
| 824 |
+
"ethicalConsiderations",
|
| 825 |
+
"ethics",
|
| 826 |
+
"fairness"
|
| 827 |
+
],
|
| 828 |
+
"validation": "optional",
|
| 829 |
+
"data_type": "string"
|
| 830 |
+
},
|
| 831 |
+
"scoring": {
|
| 832 |
+
"points": 2.0,
|
| 833 |
+
"required_for_profiles": [
|
| 834 |
+
"advanced"
|
| 835 |
+
],
|
| 836 |
+
"category_contribution": 0.067
|
| 837 |
+
},
|
| 838 |
+
"validation_message": {
|
| 839 |
+
"missing": "Missing field: ethicalConsiderations - ethical information is critical",
|
| 840 |
+
"recommendation": "Add ethical considerations or fairness assessments"
|
| 841 |
+
},
|
| 842 |
+
"reference_urls": {
|
| 843 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_ethicalConsiderations",
|
| 844 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_ethicalConsiderations",
|
| 845 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 846 |
+
}
|
| 847 |
+
},
|
| 848 |
+
"energyConsumption": {
|
| 849 |
+
"tier": "important",
|
| 850 |
+
"weight": 2.0,
|
| 851 |
+
"category": "component_model_card",
|
| 852 |
+
"description": "Energy consumption information",
|
| 853 |
+
"jsonpath": "$.metadata.properties[?(@.name=='energyConsumption')].value",
|
| 854 |
+
"aibom_generation": {
|
| 855 |
+
"location": "$.metadata.properties",
|
| 856 |
+
"rule": "include_if_available",
|
| 857 |
+
"source_fields": [
|
| 858 |
+
"energyConsumption",
|
| 859 |
+
"energy_usage"
|
| 860 |
+
],
|
| 861 |
+
"validation": "optional",
|
| 862 |
+
"data_type": "string"
|
| 863 |
+
},
|
| 864 |
+
"scoring": {
|
| 865 |
+
"points": 2.0,
|
| 866 |
+
"required_for_profiles": [
|
| 867 |
+
"advanced"
|
| 868 |
+
],
|
| 869 |
+
"category_contribution": 0.067
|
| 870 |
+
},
|
| 871 |
+
"validation_message": {
|
| 872 |
+
"missing": "Missing field: energyConsumption - energy usage information helpful for sustainability",
|
| 873 |
+
"recommendation": "Add information about the model's energy consumption"
|
| 874 |
+
},
|
| 875 |
+
"reference_urls": {
|
| 876 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions",
|
| 877 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions",
|
| 878 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 879 |
+
}
|
| 880 |
+
},
|
| 881 |
+
"hyperparameter": {
|
| 882 |
+
"tier": "important",
|
| 883 |
+
"weight": 2.0,
|
| 884 |
+
"category": "component_model_card",
|
| 885 |
+
"description": "Key hyperparameters of the model architecture",
|
| 886 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:hyperparameter')].value",
|
| 887 |
+
"aibom_generation": {
|
| 888 |
+
"location": "$.metadata.properties",
|
| 889 |
+
"rule": "include_if_available",
|
| 890 |
+
"source_fields": [
|
| 891 |
+
"hyperparameter",
|
| 892 |
+
"hyperparameters",
|
| 893 |
+
"training_params"
|
| 894 |
+
],
|
| 895 |
+
"validation": "optional",
|
| 896 |
+
"data_type": "string"
|
| 897 |
+
},
|
| 898 |
+
"scoring": {
|
| 899 |
+
"points": 2.0,
|
| 900 |
+
"required_for_profiles": [
|
| 901 |
+
"advanced"
|
| 902 |
+
],
|
| 903 |
+
"category_contribution": 0.067
|
| 904 |
+
},
|
| 905 |
+
"validation_message": {
|
| 906 |
+
"missing": "Missing field: hyperparameter - training configuration helpful for reproducibility",
|
| 907 |
+
"recommendation": "Add key hyperparameters used during model training"
|
| 908 |
+
},
|
| 909 |
+
"reference_urls": {
|
| 910 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-hyperparameter",
|
| 911 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 912 |
+
}
|
| 913 |
+
},
|
| 914 |
+
"technicalLimitations": {
|
| 915 |
+
"tier": "important",
|
| 916 |
+
"weight": 2.0,
|
| 917 |
+
"category": "component_model_card",
|
| 918 |
+
"description": "Known limitations of the model",
|
| 919 |
+
"jsonpath": "$.component.modelCard.considerations.technicalLimitations[0]",
|
| 920 |
+
"aibom_generation": {
|
| 921 |
+
"location": "$.component.modelCard.considerations.technicalLimitations",
|
| 922 |
+
"rule": "include_if_available",
|
| 923 |
+
"source_fields": [
|
| 924 |
+
"technicalLimitations",
|
| 925 |
+
"limitations",
|
| 926 |
+
"known_issues"
|
| 927 |
+
],
|
| 928 |
+
"validation": "optional",
|
| 929 |
+
"data_type": "string"
|
| 930 |
+
},
|
| 931 |
+
"scoring": {
|
| 932 |
+
"points": 2.0,
|
| 933 |
+
"required_for_profiles": [
|
| 934 |
+
"advanced"
|
| 935 |
+
],
|
| 936 |
+
"category_contribution": 0.067
|
| 937 |
+
},
|
| 938 |
+
"validation_message": {
|
| 939 |
+
"missing": "Missing field: technicalLimitations - limitations information helpful for safety",
|
| 940 |
+
"recommendation": "Add known technical limitations of the model"
|
| 941 |
+
},
|
| 942 |
+
"reference_urls": {
|
| 943 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_technicalLimitations",
|
| 944 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_technicalLimitations",
|
| 945 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 946 |
+
}
|
| 947 |
+
},
|
| 948 |
+
"safetyRiskAssessment": {
|
| 949 |
+
"tier": "important",
|
| 950 |
+
"weight": 2.0,
|
| 951 |
+
"category": "component_model_card",
|
| 952 |
+
"description": "Safety and risk assessment information",
|
| 953 |
+
"jsonpath": "$.metadata.properties[?(@.name=='safetyRiskAssessment')].value",
|
| 954 |
+
"aibom_generation": {
|
| 955 |
+
"location": "$.metadata.properties",
|
| 956 |
+
"rule": "include_if_available",
|
| 957 |
+
"source_fields": [
|
| 958 |
+
"safetyRiskAssessment",
|
| 959 |
+
"safety_assessment",
|
| 960 |
+
"risk_analysis"
|
| 961 |
+
],
|
| 962 |
+
"validation": "optional",
|
| 963 |
+
"data_type": "string"
|
| 964 |
+
},
|
| 965 |
+
"scoring": {
|
| 966 |
+
"points": 2.0,
|
| 967 |
+
"required_for_profiles": [
|
| 968 |
+
"advanced"
|
| 969 |
+
],
|
| 970 |
+
"category_contribution": 0.067
|
| 971 |
+
},
|
| 972 |
+
"validation_message": {
|
| 973 |
+
"missing": "Missing field: safetyRiskAssessment - safety assessment important for responsible deployment",
|
| 974 |
+
"recommendation": "Add safety and risk assessment information"
|
| 975 |
+
},
|
| 976 |
+
"reference_urls": {
|
| 977 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-safetyRiskAssessment",
|
| 978 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 979 |
+
}
|
| 980 |
+
},
|
| 981 |
+
"intendedUse": {
|
| 982 |
+
"tier": "important",
|
| 983 |
+
"weight": 2.0,
|
| 984 |
+
"category": "component_model_card",
|
| 985 |
+
"description": "Intended use cases for the model",
|
| 986 |
+
"jsonpath": "$.component.modelCard.considerations.useCases[0]",
|
| 987 |
+
"aibom_generation": {
|
| 988 |
+
"location": "$.component.modelCard.considerations.useCases",
|
| 989 |
+
"rule": "include_if_available",
|
| 990 |
+
"source_fields": [
|
| 991 |
+
"intendedUse",
|
| 992 |
+
"use_cases",
|
| 993 |
+
"applications"
|
| 994 |
+
],
|
| 995 |
+
"validation": "optional",
|
| 996 |
+
"data_type": "string"
|
| 997 |
+
},
|
| 998 |
+
"scoring": {
|
| 999 |
+
"points": 2.0,
|
| 1000 |
+
"required_for_profiles": [
|
| 1001 |
+
"advanced"
|
| 1002 |
+
],
|
| 1003 |
+
"category_contribution": 0.067
|
| 1004 |
+
},
|
| 1005 |
+
"validation_message": {
|
| 1006 |
+
"missing": "Missing field: intendedUse - intended use information helpful for context",
|
| 1007 |
+
"recommendation": "Add intended use cases for the model"
|
| 1008 |
+
},
|
| 1009 |
+
"reference_urls": {
|
| 1010 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_useCases",
|
| 1011 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_useCases",
|
| 1012 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1013 |
+
}
|
| 1014 |
+
},
|
| 1015 |
+
"typeOfModel": {
|
| 1016 |
+
"tier": "important",
|
| 1017 |
+
"weight": 2.0,
|
| 1018 |
+
"category": "component_model_card",
|
| 1019 |
+
"description": "Type or architecture of the model",
|
| 1020 |
+
"jsonpath": "$.components[0].modelCard.modelParameters.modelArchitecture",
|
| 1021 |
+
"aibom_generation": {
|
| 1022 |
+
"location": "$.components[0].modelCard.modelParameters.modelArchitecture",
|
| 1023 |
+
"rule": "include_if_available",
|
| 1024 |
+
"source_fields": [
|
| 1025 |
+
"typeOfModel",
|
| 1026 |
+
"model_type",
|
| 1027 |
+
"architecture"
|
| 1028 |
+
],
|
| 1029 |
+
"validation": "recommended",
|
| 1030 |
+
"data_type": "string"
|
| 1031 |
+
},
|
| 1032 |
+
"scoring": {
|
| 1033 |
+
"points": 2.0,
|
| 1034 |
+
"required_for_profiles": [
|
| 1035 |
+
"advanced"
|
| 1036 |
+
],
|
| 1037 |
+
"category_contribution": 0.067
|
| 1038 |
+
},
|
| 1039 |
+
"validation_message": {
|
| 1040 |
+
"missing": "Missing field: typeOfModel - model architecture information helpful",
|
| 1041 |
+
"recommendation": "Add the type or architecture of the model (e.g., Transformer, CNN)"
|
| 1042 |
+
},
|
| 1043 |
+
"reference_urls": {
|
| 1044 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_modelParameters_approach",
|
| 1045 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_modelParameters_approach",
|
| 1046 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1047 |
+
}
|
| 1048 |
+
},
|
| 1049 |
+
"modelExplainability": {
|
| 1050 |
+
"tier": "supplementary",
|
| 1051 |
+
"weight": 1.0,
|
| 1052 |
+
"category": "component_model_card",
|
| 1053 |
+
"description": "Information about model explainability",
|
| 1054 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:modelCardExplainability')].value",
|
| 1055 |
+
"aibom_generation": {
|
| 1056 |
+
"location": "$.metadata.properties",
|
| 1057 |
+
"rule": "include_if_available",
|
| 1058 |
+
"source_fields": [
|
| 1059 |
+
"modelExplainability",
|
| 1060 |
+
"explainability",
|
| 1061 |
+
"interpretability"
|
| 1062 |
+
],
|
| 1063 |
+
"validation": "optional",
|
| 1064 |
+
"data_type": "string"
|
| 1065 |
+
},
|
| 1066 |
+
"scoring": {
|
| 1067 |
+
"points": 1.0,
|
| 1068 |
+
"required_for_profiles": [
|
| 1069 |
+
"advanced"
|
| 1070 |
+
],
|
| 1071 |
+
"category_contribution": 0.033
|
| 1072 |
+
},
|
| 1073 |
+
"validation_message": {
|
| 1074 |
+
"missing": "Missing supplementary field: modelExplainability - explainability information helpful for transparency",
|
| 1075 |
+
"recommendation": "Add information about model explainability or interpretability features"
|
| 1076 |
+
},
|
| 1077 |
+
"reference_urls": {
|
| 1078 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-modelExplainability",
|
| 1079 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1080 |
+
}
|
| 1081 |
+
},
|
| 1082 |
+
"energyQuantity": {
|
| 1083 |
+
"tier": "supplementary",
|
| 1084 |
+
"weight": 1.0,
|
| 1085 |
+
"category": "component_model_card",
|
| 1086 |
+
"description": "Quantitative energy consumption data",
|
| 1087 |
+
"jsonpath": "$.metadata.properties[?(@.name=='energyQuantity')].value",
|
| 1088 |
+
"aibom_generation": {
|
| 1089 |
+
"location": "$.metadata.properties",
|
| 1090 |
+
"rule": "include_if_available",
|
| 1091 |
+
"source_fields": [
|
| 1092 |
+
"energyQuantity",
|
| 1093 |
+
"energy_amount"
|
| 1094 |
+
],
|
| 1095 |
+
"validation": "optional",
|
| 1096 |
+
"data_type": "number"
|
| 1097 |
+
},
|
| 1098 |
+
"scoring": {
|
| 1099 |
+
"points": 1.0,
|
| 1100 |
+
"required_for_profiles": [
|
| 1101 |
+
"advanced"
|
| 1102 |
+
],
|
| 1103 |
+
"category_contribution": 0.033
|
| 1104 |
+
},
|
| 1105 |
+
"validation_message": {
|
| 1106 |
+
"missing": "Missing supplementary field: energyQuantity - quantitative energy data helpful for sustainability metrics",
|
| 1107 |
+
"recommendation": "Add specific energy consumption quantities"
|
| 1108 |
+
},
|
| 1109 |
+
"reference_urls": {
|
| 1110 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions_items_activityEnergyCost_value",
|
| 1111 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions_items_activityEnergyCost_value",
|
| 1112 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1113 |
+
}
|
| 1114 |
+
},
|
| 1115 |
+
"energyUnit": {
|
| 1116 |
+
"tier": "supplementary",
|
| 1117 |
+
"weight": 1.0,
|
| 1118 |
+
"category": "component_model_card",
|
| 1119 |
+
"description": "Unit of measurement for energy consumption",
|
| 1120 |
+
"jsonpath": "$.metadata.properties[?(@.name=='energyUnit')].value",
|
| 1121 |
+
"aibom_generation": {
|
| 1122 |
+
"location": "$.metadata.properties",
|
| 1123 |
+
"rule": "include_if_available",
|
| 1124 |
+
"source_fields": [
|
| 1125 |
+
"energyUnit",
|
| 1126 |
+
"energy_unit"
|
| 1127 |
+
],
|
| 1128 |
+
"validation": "optional",
|
| 1129 |
+
"data_type": "string"
|
| 1130 |
+
},
|
| 1131 |
+
"scoring": {
|
| 1132 |
+
"points": 1.0,
|
| 1133 |
+
"required_for_profiles": [
|
| 1134 |
+
"advanced"
|
| 1135 |
+
],
|
| 1136 |
+
"category_contribution": 0.033
|
| 1137 |
+
},
|
| 1138 |
+
"validation_message": {
|
| 1139 |
+
"missing": "Missing supplementary field: energyUnit - energy measurement unit helpful for standardization",
|
| 1140 |
+
"recommendation": "Add the unit of measurement for energy consumption (e.g., kWh, Joules)"
|
| 1141 |
+
},
|
| 1142 |
+
"reference_urls": {
|
| 1143 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions_items_activityEnergyCost_unit",
|
| 1144 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_environmentalConsiderations_energyConsumptions_items_activityEnergyCost_unit",
|
| 1145 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1146 |
+
}
|
| 1147 |
+
},
|
| 1148 |
+
"informationAboutTraining": {
|
| 1149 |
+
"tier": "supplementary",
|
| 1150 |
+
"weight": 1.0,
|
| 1151 |
+
"category": "component_model_card",
|
| 1152 |
+
"description": "Information about the training process",
|
| 1153 |
+
"jsonpath": "$.metadata.properties[?(@.name=='informationAboutTraining')].value",
|
| 1154 |
+
"aibom_generation": {
|
| 1155 |
+
"location": "$.metadata.properties",
|
| 1156 |
+
"rule": "include_if_available",
|
| 1157 |
+
"source_fields": [
|
| 1158 |
+
"informationAboutTraining",
|
| 1159 |
+
"training_info",
|
| 1160 |
+
"training_details"
|
| 1161 |
+
],
|
| 1162 |
+
"validation": "optional",
|
| 1163 |
+
"data_type": "string"
|
| 1164 |
+
},
|
| 1165 |
+
"scoring": {
|
| 1166 |
+
"points": 1.0,
|
| 1167 |
+
"required_for_profiles": [
|
| 1168 |
+
"advanced"
|
| 1169 |
+
],
|
| 1170 |
+
"category_contribution": 0.033
|
| 1171 |
+
},
|
| 1172 |
+
"validation_message": {
|
| 1173 |
+
"missing": "Missing supplementary field: informationAboutTraining - training details helpful for understanding model development",
|
| 1174 |
+
"recommendation": "Add information about the training process and methodology"
|
| 1175 |
+
},
|
| 1176 |
+
"reference_urls": {
|
| 1177 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-informationAboutTraining",
|
| 1178 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1179 |
+
}
|
| 1180 |
+
},
|
| 1181 |
+
"informationAboutApplication": {
|
| 1182 |
+
"tier": "supplementary",
|
| 1183 |
+
"weight": 1.0,
|
| 1184 |
+
"category": "component_model_card",
|
| 1185 |
+
"description": "Information about intended applications",
|
| 1186 |
+
"jsonpath": "$.metadata.properties[?(@.name=='informationAboutApplication')].value",
|
| 1187 |
+
"aibom_generation": {
|
| 1188 |
+
"location": "$.metadata.properties",
|
| 1189 |
+
"rule": "include_if_available",
|
| 1190 |
+
"source_fields": [
|
| 1191 |
+
"informationAboutApplication",
|
| 1192 |
+
"application_info",
|
| 1193 |
+
"intended_use"
|
| 1194 |
+
],
|
| 1195 |
+
"validation": "optional",
|
| 1196 |
+
"data_type": "string"
|
| 1197 |
+
},
|
| 1198 |
+
"scoring": {
|
| 1199 |
+
"points": 1.0,
|
| 1200 |
+
"required_for_profiles": [
|
| 1201 |
+
"advanced"
|
| 1202 |
+
],
|
| 1203 |
+
"category_contribution": 0.033
|
| 1204 |
+
},
|
| 1205 |
+
"validation_message": {
|
| 1206 |
+
"missing": "Missing supplementary field: informationAboutApplication - application guidance helpful for proper usage",
|
| 1207 |
+
"recommendation": "Add information about intended applications and use cases"
|
| 1208 |
+
},
|
| 1209 |
+
"reference_urls": {
|
| 1210 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_considerations_useCases",
|
| 1211 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_considerations_useCases",
|
| 1212 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1213 |
+
}
|
| 1214 |
+
},
|
| 1215 |
+
"metric": {
|
| 1216 |
+
"tier": "supplementary",
|
| 1217 |
+
"weight": 1.0,
|
| 1218 |
+
"category": "component_model_card",
|
| 1219 |
+
"description": "Performance metrics and evaluation results",
|
| 1220 |
+
"jsonpath": "$.metadata.properties[?(@.name=='metric')].value",
|
| 1221 |
+
"aibom_generation": {
|
| 1222 |
+
"location": "$.metadata.properties",
|
| 1223 |
+
"rule": "include_if_available",
|
| 1224 |
+
"source_fields": [
|
| 1225 |
+
"metric",
|
| 1226 |
+
"metrics",
|
| 1227 |
+
"performance"
|
| 1228 |
+
],
|
| 1229 |
+
"validation": "optional",
|
| 1230 |
+
"data_type": "string"
|
| 1231 |
+
},
|
| 1232 |
+
"scoring": {
|
| 1233 |
+
"points": 1.0,
|
| 1234 |
+
"required_for_profiles": [
|
| 1235 |
+
"advanced"
|
| 1236 |
+
],
|
| 1237 |
+
"category_contribution": 0.033
|
| 1238 |
+
},
|
| 1239 |
+
"validation_message": {
|
| 1240 |
+
"missing": "Missing supplementary field: metric - performance metrics helpful for evaluation",
|
| 1241 |
+
"recommendation": "Add performance metrics and evaluation results"
|
| 1242 |
+
},
|
| 1243 |
+
"reference_urls": {
|
| 1244 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_modelCard_quantitativeAnalysis_performanceMetrics",
|
| 1245 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_modelCard_quantitativeAnalysis_performanceMetrics",
|
| 1246 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1247 |
+
}
|
| 1248 |
+
},
|
| 1249 |
+
"metricDecisionThreshold": {
|
| 1250 |
+
"tier": "supplementary",
|
| 1251 |
+
"weight": 1.0,
|
| 1252 |
+
"category": "component_model_card",
|
| 1253 |
+
"description": "Decision thresholds for metrics",
|
| 1254 |
+
"jsonpath": "$.metadata.properties[?(@.name=='metricDecisionThreshold')].value",
|
| 1255 |
+
"aibom_generation": {
|
| 1256 |
+
"location": "$.metadata.properties",
|
| 1257 |
+
"rule": "include_if_available",
|
| 1258 |
+
"source_fields": [
|
| 1259 |
+
"metricDecisionThreshold",
|
| 1260 |
+
"decision_threshold",
|
| 1261 |
+
"threshold"
|
| 1262 |
+
],
|
| 1263 |
+
"validation": "optional",
|
| 1264 |
+
"data_type": "number"
|
| 1265 |
+
},
|
| 1266 |
+
"scoring": {
|
| 1267 |
+
"points": 1.0,
|
| 1268 |
+
"required_for_profiles": [
|
| 1269 |
+
"advanced"
|
| 1270 |
+
],
|
| 1271 |
+
"category_contribution": 0.033
|
| 1272 |
+
},
|
| 1273 |
+
"validation_message": {
|
| 1274 |
+
"missing": "Missing supplementary field: metricDecisionThreshold - decision thresholds helpful for operational guidance",
|
| 1275 |
+
"recommendation": "Add decision thresholds for performance metrics"
|
| 1276 |
+
},
|
| 1277 |
+
"reference_urls": {
|
| 1278 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-metricDecisionThreshold",
|
| 1279 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1280 |
+
}
|
| 1281 |
+
},
|
| 1282 |
+
"modelDataPreprocessing": {
|
| 1283 |
+
"tier": "supplementary",
|
| 1284 |
+
"weight": 1.0,
|
| 1285 |
+
"category": "component_model_card",
|
| 1286 |
+
"description": "Data preprocessing information",
|
| 1287 |
+
"jsonpath": "$.metadata.properties[?(@.name=='modelDataPreprocessing')].value",
|
| 1288 |
+
"aibom_generation": {
|
| 1289 |
+
"location": "$.metadata.properties",
|
| 1290 |
+
"rule": "include_if_available",
|
| 1291 |
+
"source_fields": [
|
| 1292 |
+
"modelDataPreprocessing",
|
| 1293 |
+
"data_preprocessing",
|
| 1294 |
+
"preprocessing"
|
| 1295 |
+
],
|
| 1296 |
+
"validation": "optional",
|
| 1297 |
+
"data_type": "string"
|
| 1298 |
+
},
|
| 1299 |
+
"scoring": {
|
| 1300 |
+
"points": 1.0,
|
| 1301 |
+
"required_for_profiles": [
|
| 1302 |
+
"advanced"
|
| 1303 |
+
],
|
| 1304 |
+
"category_contribution": 0.033
|
| 1305 |
+
},
|
| 1306 |
+
"validation_message": {
|
| 1307 |
+
"missing": "Missing supplementary field: modelDataPreprocessing - preprocessing details helpful for reproducibility",
|
| 1308 |
+
"recommendation": "Add information about data preprocessing steps"
|
| 1309 |
+
},
|
| 1310 |
+
"reference_urls": {
|
| 1311 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-modelDataPreprocessing",
|
| 1312 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1313 |
+
}
|
| 1314 |
+
},
|
| 1315 |
+
"useSensitivePersonalInformation": {
|
| 1316 |
+
"tier": "supplementary",
|
| 1317 |
+
"weight": 1.0,
|
| 1318 |
+
"category": "component_model_card",
|
| 1319 |
+
"description": "Information about use of sensitive personal data",
|
| 1320 |
+
"jsonpath": "$.metadata.properties[?(@.name=='useSensitivePersonalInformation')].value",
|
| 1321 |
+
"aibom_generation": {
|
| 1322 |
+
"location": "$.metadata.properties",
|
| 1323 |
+
"rule": "include_if_available",
|
| 1324 |
+
"source_fields": [
|
| 1325 |
+
"useSensitivePersonalInformation",
|
| 1326 |
+
"sensitive_data",
|
| 1327 |
+
"personal_data"
|
| 1328 |
+
],
|
| 1329 |
+
"validation": "optional",
|
| 1330 |
+
"data_type": "boolean"
|
| 1331 |
+
},
|
| 1332 |
+
"scoring": {
|
| 1333 |
+
"points": 1.0,
|
| 1334 |
+
"required_for_profiles": [
|
| 1335 |
+
"advanced"
|
| 1336 |
+
],
|
| 1337 |
+
"category_contribution": 0.033
|
| 1338 |
+
},
|
| 1339 |
+
"validation_message": {
|
| 1340 |
+
"missing": "Missing supplementary field: useSensitivePersonalInformation - privacy information important for compliance",
|
| 1341 |
+
"recommendation": "Add information about use of sensitive or personal data"
|
| 1342 |
+
},
|
| 1343 |
+
"reference_urls": {
|
| 1344 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/model/AI/Classes/AIPackage/#AI-useSensitivePersonalInformation",
|
| 1345 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1346 |
+
}
|
| 1347 |
+
},
|
| 1348 |
+
"downloadLocation": {
|
| 1349 |
+
"tier": "important",
|
| 1350 |
+
"weight": 3.0,
|
| 1351 |
+
"category": "external_references",
|
| 1352 |
+
"description": "URL to download the model",
|
| 1353 |
+
"jsonpath": "$.components[0].externalReferences[?(@.type=='distribution' || @.type=='website')].url",
|
| 1354 |
+
"aibom_generation": {
|
| 1355 |
+
"location": "$.component.externalReferences",
|
| 1356 |
+
"rule": "include_if_available",
|
| 1357 |
+
"source_fields": [
|
| 1358 |
+
"downloadLocation",
|
| 1359 |
+
"download_url",
|
| 1360 |
+
"model_url"
|
| 1361 |
+
],
|
| 1362 |
+
"validation": "recommended",
|
| 1363 |
+
"data_type": "string"
|
| 1364 |
+
},
|
| 1365 |
+
"scoring": {
|
| 1366 |
+
"points": 3.0,
|
| 1367 |
+
"required_for_profiles": [
|
| 1368 |
+
"standard",
|
| 1369 |
+
"advanced"
|
| 1370 |
+
],
|
| 1371 |
+
"category_contribution": 0.15
|
| 1372 |
+
},
|
| 1373 |
+
"validation_message": {
|
| 1374 |
+
"missing": "Missing field: downloadLocation - model download URL required",
|
| 1375 |
+
"recommendation": "Add a URL where the model can be downloaded"
|
| 1376 |
+
},
|
| 1377 |
+
"reference_urls": {
|
| 1378 |
+
"cyclonedx_1.6": "https://cyclonedx.org/docs/1.6/json/#components_items_externalReferences",
|
| 1379 |
+
"cyclonedx_1.7": "https://cyclonedx.org/docs/1.7/json/#components_items_externalReferences",
|
| 1380 |
+
"spdx_3.1": "https://spdx.github.io/spdx-spec/v3.1-RC1/ai/"
|
| 1381 |
+
}
|
| 1382 |
+
},
|
| 1383 |
+
"vocab_size": {
|
| 1384 |
+
"tier": "supplementary",
|
| 1385 |
+
"weight": 1.0,
|
| 1386 |
+
"category": "component_model_card",
|
| 1387 |
+
"description": "Expected size of the model's vocabulary",
|
| 1388 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:vocabSize')].value",
|
| 1389 |
+
"aibom_generation": {
|
| 1390 |
+
"location": "$.components[0].properties",
|
| 1391 |
+
"rule": "include_if_available",
|
| 1392 |
+
"source_fields": [
|
| 1393 |
+
"vocab_size"
|
| 1394 |
+
],
|
| 1395 |
+
"validation": "optional",
|
| 1396 |
+
"data_type": "integer"
|
| 1397 |
+
},
|
| 1398 |
+
"scoring": {
|
| 1399 |
+
"points": 1.0,
|
| 1400 |
+
"required_for_profiles": [
|
| 1401 |
+
"advanced"
|
| 1402 |
+
],
|
| 1403 |
+
"category_contribution": 0.033
|
| 1404 |
+
},
|
| 1405 |
+
"validation_message": {
|
| 1406 |
+
"missing": "Missing supplementary field: vocab_size - GGUF model properties helpful for reproducibility",
|
| 1407 |
+
"recommendation": "Add Vocabulary Size"
|
| 1408 |
+
},
|
| 1409 |
+
"reference_urls": {
|
| 1410 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1411 |
+
}
|
| 1412 |
+
},
|
| 1413 |
+
"tokenizer_class": {
|
| 1414 |
+
"tier": "supplementary",
|
| 1415 |
+
"weight": 1.0,
|
| 1416 |
+
"category": "component_model_card",
|
| 1417 |
+
"description": "The specific tokenizer class or method used",
|
| 1418 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:tokenizerClass')].value",
|
| 1419 |
+
"aibom_generation": {
|
| 1420 |
+
"location": "$.components[0].properties",
|
| 1421 |
+
"rule": "include_if_available",
|
| 1422 |
+
"source_fields": [
|
| 1423 |
+
"tokenizer_class"
|
| 1424 |
+
],
|
| 1425 |
+
"validation": "optional",
|
| 1426 |
+
"data_type": "string"
|
| 1427 |
+
},
|
| 1428 |
+
"scoring": {
|
| 1429 |
+
"points": 1.0,
|
| 1430 |
+
"required_for_profiles": [
|
| 1431 |
+
"advanced"
|
| 1432 |
+
],
|
| 1433 |
+
"category_contribution": 0.033
|
| 1434 |
+
},
|
| 1435 |
+
"validation_message": {
|
| 1436 |
+
"missing": "Missing supplementary field: tokenizer_class - GGUF model properties helpful for reproducibility",
|
| 1437 |
+
"recommendation": "Add Tokenizer Class"
|
| 1438 |
+
},
|
| 1439 |
+
"reference_urls": {
|
| 1440 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1441 |
+
}
|
| 1442 |
+
},
|
| 1443 |
+
"context_length": {
|
| 1444 |
+
"tier": "supplementary",
|
| 1445 |
+
"weight": 1.0,
|
| 1446 |
+
"category": "component_model_card",
|
| 1447 |
+
"description": "Maximum context length or sequence length supported",
|
| 1448 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:contextLength')].value",
|
| 1449 |
+
"aibom_generation": {
|
| 1450 |
+
"location": "$.components[0].properties",
|
| 1451 |
+
"rule": "include_if_available",
|
| 1452 |
+
"source_fields": [
|
| 1453 |
+
"context_length"
|
| 1454 |
+
],
|
| 1455 |
+
"validation": "optional",
|
| 1456 |
+
"data_type": "integer"
|
| 1457 |
+
},
|
| 1458 |
+
"scoring": {
|
| 1459 |
+
"points": 1.0,
|
| 1460 |
+
"required_for_profiles": [
|
| 1461 |
+
"advanced"
|
| 1462 |
+
],
|
| 1463 |
+
"category_contribution": 0.033
|
| 1464 |
+
},
|
| 1465 |
+
"validation_message": {
|
| 1466 |
+
"missing": "Missing supplementary field: context_length - GGUF model properties helpful for reproducibility",
|
| 1467 |
+
"recommendation": "Add Context Length"
|
| 1468 |
+
},
|
| 1469 |
+
"reference_urls": {
|
| 1470 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1471 |
+
}
|
| 1472 |
+
},
|
| 1473 |
+
"embedding_length": {
|
| 1474 |
+
"tier": "supplementary",
|
| 1475 |
+
"weight": 1.0,
|
| 1476 |
+
"category": "component_model_card",
|
| 1477 |
+
"description": "Vector length of the token embeddings",
|
| 1478 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:embeddingLength')].value",
|
| 1479 |
+
"aibom_generation": {
|
| 1480 |
+
"location": "$.components[0].properties",
|
| 1481 |
+
"rule": "include_if_available",
|
| 1482 |
+
"source_fields": [
|
| 1483 |
+
"embedding_length"
|
| 1484 |
+
],
|
| 1485 |
+
"validation": "optional",
|
| 1486 |
+
"data_type": "integer"
|
| 1487 |
+
},
|
| 1488 |
+
"scoring": {
|
| 1489 |
+
"points": 1.0,
|
| 1490 |
+
"required_for_profiles": [
|
| 1491 |
+
"advanced"
|
| 1492 |
+
],
|
| 1493 |
+
"category_contribution": 0.033
|
| 1494 |
+
},
|
| 1495 |
+
"validation_message": {
|
| 1496 |
+
"missing": "Missing supplementary field: embedding_length - GGUF model properties helpful for reproducibility",
|
| 1497 |
+
"recommendation": "Add Embedding Length"
|
| 1498 |
+
},
|
| 1499 |
+
"reference_urls": {
|
| 1500 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1501 |
+
}
|
| 1502 |
+
},
|
| 1503 |
+
"block_count": {
|
| 1504 |
+
"tier": "supplementary",
|
| 1505 |
+
"weight": 1.0,
|
| 1506 |
+
"category": "component_model_card",
|
| 1507 |
+
"description": "Number of transformer blocks or layers",
|
| 1508 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:blockCount')].value",
|
| 1509 |
+
"aibom_generation": {
|
| 1510 |
+
"location": "$.components[0].properties",
|
| 1511 |
+
"rule": "include_if_available",
|
| 1512 |
+
"source_fields": [
|
| 1513 |
+
"block_count"
|
| 1514 |
+
],
|
| 1515 |
+
"validation": "optional",
|
| 1516 |
+
"data_type": "integer"
|
| 1517 |
+
},
|
| 1518 |
+
"scoring": {
|
| 1519 |
+
"points": 1.0,
|
| 1520 |
+
"required_for_profiles": [
|
| 1521 |
+
"advanced"
|
| 1522 |
+
],
|
| 1523 |
+
"category_contribution": 0.033
|
| 1524 |
+
},
|
| 1525 |
+
"validation_message": {
|
| 1526 |
+
"missing": "Missing supplementary field: block_count - GGUF model properties helpful for reproducibility",
|
| 1527 |
+
"recommendation": "Add Block Count"
|
| 1528 |
+
},
|
| 1529 |
+
"reference_urls": {
|
| 1530 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1531 |
+
}
|
| 1532 |
+
},
|
| 1533 |
+
"attention_head_count": {
|
| 1534 |
+
"tier": "supplementary",
|
| 1535 |
+
"weight": 1.0,
|
| 1536 |
+
"category": "component_model_card",
|
| 1537 |
+
"description": "Number of attention heads in the model",
|
| 1538 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:attentionHeadCount')].value",
|
| 1539 |
+
"aibom_generation": {
|
| 1540 |
+
"location": "$.components[0].properties",
|
| 1541 |
+
"rule": "include_if_available",
|
| 1542 |
+
"source_fields": [
|
| 1543 |
+
"attention_head_count"
|
| 1544 |
+
],
|
| 1545 |
+
"validation": "optional",
|
| 1546 |
+
"data_type": "integer"
|
| 1547 |
+
},
|
| 1548 |
+
"scoring": {
|
| 1549 |
+
"points": 1.0,
|
| 1550 |
+
"required_for_profiles": [
|
| 1551 |
+
"advanced"
|
| 1552 |
+
],
|
| 1553 |
+
"category_contribution": 0.033
|
| 1554 |
+
},
|
| 1555 |
+
"validation_message": {
|
| 1556 |
+
"missing": "Missing supplementary field: attention_head_count - GGUF model properties helpful for reproducibility",
|
| 1557 |
+
"recommendation": "Add Attention Head Count"
|
| 1558 |
+
},
|
| 1559 |
+
"reference_urls": {
|
| 1560 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1561 |
+
}
|
| 1562 |
+
},
|
| 1563 |
+
"attention_head_count_kv": {
|
| 1564 |
+
"tier": "supplementary",
|
| 1565 |
+
"weight": 1.0,
|
| 1566 |
+
"category": "component_model_card",
|
| 1567 |
+
"description": "Number of Key-Value attention heads",
|
| 1568 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:attentionHeadCountKV')].value",
|
| 1569 |
+
"aibom_generation": {
|
| 1570 |
+
"location": "$.components[0].properties",
|
| 1571 |
+
"rule": "include_if_available",
|
| 1572 |
+
"source_fields": [
|
| 1573 |
+
"attention_head_count_kv"
|
| 1574 |
+
],
|
| 1575 |
+
"validation": "optional",
|
| 1576 |
+
"data_type": "integer"
|
| 1577 |
+
},
|
| 1578 |
+
"scoring": {
|
| 1579 |
+
"points": 1.0,
|
| 1580 |
+
"required_for_profiles": [
|
| 1581 |
+
"advanced"
|
| 1582 |
+
],
|
| 1583 |
+
"category_contribution": 0.033
|
| 1584 |
+
},
|
| 1585 |
+
"validation_message": {
|
| 1586 |
+
"missing": "Missing supplementary field: attention_head_count_kv - GGUF model properties helpful for reproducibility",
|
| 1587 |
+
"recommendation": "Add Attention Head Count KV"
|
| 1588 |
+
},
|
| 1589 |
+
"reference_urls": {
|
| 1590 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1591 |
+
}
|
| 1592 |
+
},
|
| 1593 |
+
"feed_forward_length": {
|
| 1594 |
+
"tier": "supplementary",
|
| 1595 |
+
"weight": 1.0,
|
| 1596 |
+
"category": "component_model_card",
|
| 1597 |
+
"description": "Dimensionality of the feed-forward network",
|
| 1598 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:feedForwardLength')].value",
|
| 1599 |
+
"aibom_generation": {
|
| 1600 |
+
"location": "$.components[0].properties",
|
| 1601 |
+
"rule": "include_if_available",
|
| 1602 |
+
"source_fields": [
|
| 1603 |
+
"feed_forward_length"
|
| 1604 |
+
],
|
| 1605 |
+
"validation": "optional",
|
| 1606 |
+
"data_type": "integer"
|
| 1607 |
+
},
|
| 1608 |
+
"scoring": {
|
| 1609 |
+
"points": 1.0,
|
| 1610 |
+
"required_for_profiles": [
|
| 1611 |
+
"advanced"
|
| 1612 |
+
],
|
| 1613 |
+
"category_contribution": 0.033
|
| 1614 |
+
},
|
| 1615 |
+
"validation_message": {
|
| 1616 |
+
"missing": "Missing supplementary field: feed_forward_length - GGUF model properties helpful for reproducibility",
|
| 1617 |
+
"recommendation": "Add Feed Forward Length"
|
| 1618 |
+
},
|
| 1619 |
+
"reference_urls": {
|
| 1620 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1621 |
+
}
|
| 1622 |
+
},
|
| 1623 |
+
"rope_dimension_count": {
|
| 1624 |
+
"tier": "supplementary",
|
| 1625 |
+
"weight": 1.0,
|
| 1626 |
+
"category": "component_model_card",
|
| 1627 |
+
"description": "Number of dimensions for Rotary Position Embedding (RoPE)",
|
| 1628 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:ropeDimensionCount')].value",
|
| 1629 |
+
"aibom_generation": {
|
| 1630 |
+
"location": "$.components[0].properties",
|
| 1631 |
+
"rule": "include_if_available",
|
| 1632 |
+
"source_fields": [
|
| 1633 |
+
"rope_dimension_count"
|
| 1634 |
+
],
|
| 1635 |
+
"validation": "optional",
|
| 1636 |
+
"data_type": "integer"
|
| 1637 |
+
},
|
| 1638 |
+
"scoring": {
|
| 1639 |
+
"points": 1.0,
|
| 1640 |
+
"required_for_profiles": [
|
| 1641 |
+
"advanced"
|
| 1642 |
+
],
|
| 1643 |
+
"category_contribution": 0.033
|
| 1644 |
+
},
|
| 1645 |
+
"validation_message": {
|
| 1646 |
+
"missing": "Missing supplementary field: rope_dimension_count - GGUF model properties helpful for reproducibility",
|
| 1647 |
+
"recommendation": "Add RoPE Dimension Count"
|
| 1648 |
+
},
|
| 1649 |
+
"reference_urls": {
|
| 1650 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1651 |
+
}
|
| 1652 |
+
},
|
| 1653 |
+
"quantization_version": {
|
| 1654 |
+
"tier": "supplementary",
|
| 1655 |
+
"weight": 1.0,
|
| 1656 |
+
"category": "component_model_card",
|
| 1657 |
+
"description": "Version or specification identifier of the quantization format",
|
| 1658 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:quantizationVersion')].value",
|
| 1659 |
+
"aibom_generation": {
|
| 1660 |
+
"location": "$.components[0].properties",
|
| 1661 |
+
"rule": "include_if_available",
|
| 1662 |
+
"source_fields": [
|
| 1663 |
+
"quantization_version"
|
| 1664 |
+
],
|
| 1665 |
+
"validation": "optional",
|
| 1666 |
+
"data_type": "integer"
|
| 1667 |
+
},
|
| 1668 |
+
"scoring": {
|
| 1669 |
+
"points": 1.0,
|
| 1670 |
+
"required_for_profiles": [
|
| 1671 |
+
"advanced"
|
| 1672 |
+
],
|
| 1673 |
+
"category_contribution": 0.033
|
| 1674 |
+
},
|
| 1675 |
+
"validation_message": {
|
| 1676 |
+
"missing": "Missing supplementary field: quantization_version - GGUF model properties helpful for reproducibility",
|
| 1677 |
+
"recommendation": "Add Quantization Version"
|
| 1678 |
+
},
|
| 1679 |
+
"reference_urls": {
|
| 1680 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1681 |
+
}
|
| 1682 |
+
},
|
| 1683 |
+
"quantization_file_type": {
|
| 1684 |
+
"tier": "supplementary",
|
| 1685 |
+
"weight": 1.0,
|
| 1686 |
+
"category": "component_model_card",
|
| 1687 |
+
"description": "Enum or integer identifier for the quantization bit-precision (e.g. Q4_K_M)",
|
| 1688 |
+
"jsonpath": "$.components[0].modelCard.properties[?(@.name=='genai:aibom:modelcard:quantizationFileType')].value",
|
| 1689 |
+
"aibom_generation": {
|
| 1690 |
+
"location": "$.components[0].properties",
|
| 1691 |
+
"rule": "include_if_available",
|
| 1692 |
+
"source_fields": [
|
| 1693 |
+
"quantization_file_type"
|
| 1694 |
+
],
|
| 1695 |
+
"validation": "optional",
|
| 1696 |
+
"data_type": "integer"
|
| 1697 |
+
},
|
| 1698 |
+
"scoring": {
|
| 1699 |
+
"points": 1.0,
|
| 1700 |
+
"required_for_profiles": [
|
| 1701 |
+
"advanced"
|
| 1702 |
+
],
|
| 1703 |
+
"category_contribution": 0.033
|
| 1704 |
+
},
|
| 1705 |
+
"validation_message": {
|
| 1706 |
+
"missing": "Missing supplementary field: quantization_file_type - GGUF model properties helpful for reproducibility",
|
| 1707 |
+
"recommendation": "Add Quantization File Type"
|
| 1708 |
+
},
|
| 1709 |
+
"reference_urls": {
|
| 1710 |
+
"genai_aibom_taxonomy": "https://github.com/GenAI-Security-Project/cyclonedx-property-taxonomy"
|
| 1711 |
+
}
|
| 1712 |
+
}
|
| 1713 |
+
}
|
| 1714 |
+
}
|
src/models/gguf_metadata.py
ADDED
|
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
GGUF Metadata Extraction for AIBOM Generator
|
| 3 |
+
|
| 4 |
+
This module extracts metadata from GGUF files without downloading the full file.
|
| 5 |
+
It uses HTTP range requests to fetch only the header portion (typically 2-8MB)
|
| 6 |
+
of potentially multi-GB model files.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import struct
|
| 10 |
+
import logging
|
| 11 |
+
from typing import Dict, Any, Optional, List, OrderedDict
|
| 12 |
+
from collections import OrderedDict as OrderedDictType
|
| 13 |
+
from urllib.parse import quote
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
GGUF_MAGIC = 0x46554747
|
| 18 |
+
|
| 19 |
+
_STRUCT_UINT8 = struct.Struct("<B")
|
| 20 |
+
_STRUCT_INT8 = struct.Struct("<b")
|
| 21 |
+
_STRUCT_UINT16 = struct.Struct("<H")
|
| 22 |
+
_STRUCT_INT16 = struct.Struct("<h")
|
| 23 |
+
_STRUCT_UINT32 = struct.Struct("<I")
|
| 24 |
+
_STRUCT_INT32 = struct.Struct("<i")
|
| 25 |
+
_STRUCT_UINT64 = struct.Struct("<Q")
|
| 26 |
+
_STRUCT_INT64 = struct.Struct("<q")
|
| 27 |
+
_STRUCT_FLOAT32 = struct.Struct("<f")
|
| 28 |
+
_STRUCT_FLOAT64 = struct.Struct("<d")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class GGUFParseError(Exception):
|
| 32 |
+
"""Base exception for GGUF parsing errors."""
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class BufferUnderrunError(GGUFParseError):
|
| 37 |
+
"""Raised when buffer doesn't contain enough data to parse."""
|
| 38 |
+
def __init__(self, message: str = "buffer underrun", *, required_bytes: Optional[int] = None):
|
| 39 |
+
super().__init__(message)
|
| 40 |
+
self.required_bytes = required_bytes
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class InvalidMagicError(GGUFParseError):
|
| 44 |
+
"""Raised when file doesn't have valid GGUF magic number."""
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class GGUFValueType:
|
| 49 |
+
UINT8 = 0
|
| 50 |
+
INT8 = 1
|
| 51 |
+
UINT16 = 2
|
| 52 |
+
INT16 = 3
|
| 53 |
+
UINT32 = 4
|
| 54 |
+
INT32 = 5
|
| 55 |
+
FLOAT32 = 6
|
| 56 |
+
BOOL = 7
|
| 57 |
+
STRING = 8
|
| 58 |
+
ARRAY = 9
|
| 59 |
+
UINT64 = 10
|
| 60 |
+
INT64 = 11
|
| 61 |
+
FLOAT64 = 12
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class GGUFMetadata:
|
| 65 |
+
"""Parsed GGUF file metadata."""
|
| 66 |
+
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
version: int,
|
| 70 |
+
tensor_count: int,
|
| 71 |
+
kv_count: int,
|
| 72 |
+
metadata: Dict[str, Any],
|
| 73 |
+
header_length: int,
|
| 74 |
+
filename: str = "",
|
| 75 |
+
):
|
| 76 |
+
self.version = version
|
| 77 |
+
self.tensor_count = tensor_count
|
| 78 |
+
self.kv_count = kv_count
|
| 79 |
+
self.metadata = metadata
|
| 80 |
+
self.header_length = header_length
|
| 81 |
+
self.filename = filename
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class GGUFModelInfo:
|
| 85 |
+
"""Model information extracted from GGUF metadata for AIBOM."""
|
| 86 |
+
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
filename: str,
|
| 90 |
+
architecture: Optional[str] = None,
|
| 91 |
+
name: Optional[str] = None,
|
| 92 |
+
quantization_version: Optional[int] = None,
|
| 93 |
+
file_type: Optional[int] = None,
|
| 94 |
+
tokenizer_model: Optional[str] = None,
|
| 95 |
+
vocab_size: Optional[int] = None,
|
| 96 |
+
context_length: Optional[int] = None,
|
| 97 |
+
embedding_length: Optional[int] = None,
|
| 98 |
+
block_count: Optional[int] = None,
|
| 99 |
+
attention_head_count: Optional[int] = None,
|
| 100 |
+
attention_head_count_kv: Optional[int] = None,
|
| 101 |
+
feed_forward_length: Optional[int] = None,
|
| 102 |
+
rope_dimension_count: Optional[int] = None,
|
| 103 |
+
description: Optional[str] = None,
|
| 104 |
+
license: Optional[str] = None,
|
| 105 |
+
author: Optional[str] = None,
|
| 106 |
+
raw_metadata: Optional[Dict[str, Any]] = None,
|
| 107 |
+
):
|
| 108 |
+
self.filename = filename
|
| 109 |
+
self.architecture = architecture
|
| 110 |
+
self.name = name
|
| 111 |
+
self.quantization_version = quantization_version
|
| 112 |
+
self.file_type = file_type
|
| 113 |
+
self.tokenizer_model = tokenizer_model
|
| 114 |
+
self.vocab_size = vocab_size
|
| 115 |
+
self.context_length = context_length
|
| 116 |
+
self.embedding_length = embedding_length
|
| 117 |
+
self.block_count = block_count
|
| 118 |
+
self.attention_head_count = attention_head_count
|
| 119 |
+
self.attention_head_count_kv = attention_head_count_kv
|
| 120 |
+
self.feed_forward_length = feed_forward_length
|
| 121 |
+
self.rope_dimension_count = rope_dimension_count
|
| 122 |
+
self.description = description
|
| 123 |
+
self.license = license
|
| 124 |
+
self.author = author
|
| 125 |
+
self.raw_metadata = raw_metadata or {}
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class _ByteReader:
|
| 129 |
+
"""Helper for reading structured binary data from a buffer."""
|
| 130 |
+
|
| 131 |
+
__slots__ = ("_view", "_offset")
|
| 132 |
+
|
| 133 |
+
def __init__(self, buffer: bytes) -> None:
|
| 134 |
+
self._view = memoryview(buffer)
|
| 135 |
+
self._offset = 0
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def offset(self) -> int:
|
| 139 |
+
return self._offset
|
| 140 |
+
|
| 141 |
+
def _require(self, size: int) -> None:
|
| 142 |
+
if self._offset + size > len(self._view):
|
| 143 |
+
raise BufferUnderrunError(
|
| 144 |
+
f"need {size} bytes at offset {self._offset}, but only {len(self._view) - self._offset} available",
|
| 145 |
+
required_bytes=self._offset + size
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def read(self, size: int) -> memoryview:
|
| 149 |
+
self._require(size)
|
| 150 |
+
start = self._offset
|
| 151 |
+
self._offset += size
|
| 152 |
+
return self._view[start:self._offset]
|
| 153 |
+
|
| 154 |
+
def read_uint8(self) -> int:
|
| 155 |
+
return _STRUCT_UINT8.unpack_from(self.read(_STRUCT_UINT8.size))[0]
|
| 156 |
+
|
| 157 |
+
def read_int8(self) -> int:
|
| 158 |
+
return _STRUCT_INT8.unpack_from(self.read(_STRUCT_INT8.size))[0]
|
| 159 |
+
|
| 160 |
+
def read_uint16(self) -> int:
|
| 161 |
+
return _STRUCT_UINT16.unpack_from(self.read(_STRUCT_UINT16.size))[0]
|
| 162 |
+
|
| 163 |
+
def read_int16(self) -> int:
|
| 164 |
+
return _STRUCT_INT16.unpack_from(self.read(_STRUCT_INT16.size))[0]
|
| 165 |
+
|
| 166 |
+
def read_uint32(self) -> int:
|
| 167 |
+
return _STRUCT_UINT32.unpack_from(self.read(_STRUCT_UINT32.size))[0]
|
| 168 |
+
|
| 169 |
+
def read_int32(self) -> int:
|
| 170 |
+
return _STRUCT_INT32.unpack_from(self.read(_STRUCT_INT32.size))[0]
|
| 171 |
+
|
| 172 |
+
def read_uint64(self) -> int:
|
| 173 |
+
return _STRUCT_UINT64.unpack_from(self.read(_STRUCT_UINT64.size))[0]
|
| 174 |
+
|
| 175 |
+
def read_int64(self) -> int:
|
| 176 |
+
return _STRUCT_INT64.unpack_from(self.read(_STRUCT_INT64.size))[0]
|
| 177 |
+
|
| 178 |
+
def read_float32(self) -> float:
|
| 179 |
+
return _STRUCT_FLOAT32.unpack_from(self.read(_STRUCT_FLOAT32.size))[0]
|
| 180 |
+
|
| 181 |
+
def read_float64(self) -> float:
|
| 182 |
+
return _STRUCT_FLOAT64.unpack_from(self.read(_STRUCT_FLOAT64.size))[0]
|
| 183 |
+
|
| 184 |
+
def read_bool(self) -> bool:
|
| 185 |
+
return self.read_uint8() != 0
|
| 186 |
+
|
| 187 |
+
def read_string(self) -> str:
|
| 188 |
+
length = self.read_uint64()
|
| 189 |
+
if length > 10_000_000:
|
| 190 |
+
raise GGUFParseError(f"string length {length} exceeds sanity limit")
|
| 191 |
+
raw = self.read(length)
|
| 192 |
+
return raw.tobytes().decode("utf-8")
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _read_value(reader: _ByteReader, value_type: int) -> Any:
|
| 196 |
+
"""Parse a GGUF metadata value based on its type."""
|
| 197 |
+
if value_type == GGUFValueType.UINT8:
|
| 198 |
+
return reader.read_uint8()
|
| 199 |
+
elif value_type == GGUFValueType.INT8:
|
| 200 |
+
return reader.read_int8()
|
| 201 |
+
elif value_type == GGUFValueType.UINT16:
|
| 202 |
+
return reader.read_uint16()
|
| 203 |
+
elif value_type == GGUFValueType.INT16:
|
| 204 |
+
return reader.read_int16()
|
| 205 |
+
elif value_type == GGUFValueType.UINT32:
|
| 206 |
+
return reader.read_uint32()
|
| 207 |
+
elif value_type == GGUFValueType.INT32:
|
| 208 |
+
return reader.read_int32()
|
| 209 |
+
elif value_type == GGUFValueType.UINT64:
|
| 210 |
+
return reader.read_uint64()
|
| 211 |
+
elif value_type == GGUFValueType.INT64:
|
| 212 |
+
return reader.read_int64()
|
| 213 |
+
elif value_type == GGUFValueType.FLOAT32:
|
| 214 |
+
return reader.read_float32()
|
| 215 |
+
elif value_type == GGUFValueType.FLOAT64:
|
| 216 |
+
return reader.read_float64()
|
| 217 |
+
elif value_type == GGUFValueType.BOOL:
|
| 218 |
+
return reader.read_bool()
|
| 219 |
+
elif value_type == GGUFValueType.STRING:
|
| 220 |
+
return reader.read_string()
|
| 221 |
+
elif value_type == GGUFValueType.ARRAY:
|
| 222 |
+
element_type = reader.read_uint32()
|
| 223 |
+
count = reader.read_uint64()
|
| 224 |
+
if count > 1_000_000:
|
| 225 |
+
raise GGUFParseError(f"array count {count} exceeds sanity limit")
|
| 226 |
+
return [_read_value(reader, element_type) for _ in range(count)]
|
| 227 |
+
else:
|
| 228 |
+
raise GGUFParseError(f"unknown GGUF value type: {value_type}")
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def parse_gguf_metadata(buffer: bytes, filename: str = "") -> GGUFMetadata:
|
| 232 |
+
"""Parse GGUF metadata from a byte buffer."""
|
| 233 |
+
reader = _ByteReader(buffer)
|
| 234 |
+
|
| 235 |
+
magic = reader.read_uint32()
|
| 236 |
+
if magic != GGUF_MAGIC:
|
| 237 |
+
raise InvalidMagicError(f"invalid magic: 0x{magic:08x}, expected 0x{GGUF_MAGIC:08x}")
|
| 238 |
+
|
| 239 |
+
version = reader.read_uint32()
|
| 240 |
+
tensor_count = reader.read_uint64()
|
| 241 |
+
kv_count = reader.read_uint64()
|
| 242 |
+
|
| 243 |
+
if kv_count > 100_000:
|
| 244 |
+
raise GGUFParseError(f"kv_count {kv_count} exceeds sanity limit")
|
| 245 |
+
|
| 246 |
+
metadata: OrderedDictType[str, Any] = OrderedDictType()
|
| 247 |
+
|
| 248 |
+
for _ in range(kv_count):
|
| 249 |
+
key = reader.read_string()
|
| 250 |
+
value_type = reader.read_uint32()
|
| 251 |
+
value = _read_value(reader, value_type)
|
| 252 |
+
metadata[key] = value
|
| 253 |
+
|
| 254 |
+
return GGUFMetadata(
|
| 255 |
+
version=version,
|
| 256 |
+
tensor_count=tensor_count,
|
| 257 |
+
kv_count=kv_count,
|
| 258 |
+
metadata=metadata,
|
| 259 |
+
header_length=reader.offset,
|
| 260 |
+
filename=filename
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def extract_model_info(gguf_metadata: GGUFMetadata) -> GGUFModelInfo:
|
| 265 |
+
"""Extract AIBOM-relevant model information from GGUF metadata."""
|
| 266 |
+
meta = gguf_metadata.metadata
|
| 267 |
+
arch = meta.get("general.architecture", "")
|
| 268 |
+
|
| 269 |
+
def get_arch_key(suffix: str) -> Optional[Any]:
|
| 270 |
+
if arch:
|
| 271 |
+
val = meta.get(f"{arch}.{suffix}")
|
| 272 |
+
if val is not None:
|
| 273 |
+
return val
|
| 274 |
+
return None
|
| 275 |
+
|
| 276 |
+
return GGUFModelInfo(
|
| 277 |
+
filename=gguf_metadata.filename,
|
| 278 |
+
architecture=arch or None,
|
| 279 |
+
name=meta.get("general.name"),
|
| 280 |
+
quantization_version=meta.get("general.quantization_version"),
|
| 281 |
+
file_type=meta.get("general.file_type"),
|
| 282 |
+
tokenizer_model=meta.get("tokenizer.ggml.model"),
|
| 283 |
+
vocab_size=len(meta.get("tokenizer.ggml.tokens", [])) or None,
|
| 284 |
+
context_length=get_arch_key("context_length"),
|
| 285 |
+
embedding_length=get_arch_key("embedding_length"),
|
| 286 |
+
block_count=get_arch_key("block_count"),
|
| 287 |
+
attention_head_count=get_arch_key("attention.head_count"),
|
| 288 |
+
attention_head_count_kv=get_arch_key("attention.head_count_kv"),
|
| 289 |
+
feed_forward_length=get_arch_key("feed_forward_length"),
|
| 290 |
+
rope_dimension_count=get_arch_key("rope.dimension_count"),
|
| 291 |
+
description=meta.get("general.description"),
|
| 292 |
+
license=meta.get("general.license"),
|
| 293 |
+
author=meta.get("general.author"),
|
| 294 |
+
raw_metadata=dict(meta)
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def build_huggingface_url(repo_id: str, filename: str, revision: str = "main") -> str:
|
| 299 |
+
"""Build a HuggingFace download URL for a file."""
|
| 300 |
+
if not repo_id or "/" not in repo_id:
|
| 301 |
+
raise ValueError("repo_id must be in format 'owner/repo'")
|
| 302 |
+
|
| 303 |
+
owner, repo = repo_id.split("/", 1)
|
| 304 |
+
owner_quoted = quote(owner, safe="-_.~")
|
| 305 |
+
repo_quoted = quote(repo, safe="-_.~")
|
| 306 |
+
revision_quoted = quote(revision, safe="-_.~")
|
| 307 |
+
filename_quoted = "/".join(quote(part, safe="-_.~/") for part in filename.split("/"))
|
| 308 |
+
|
| 309 |
+
return f"https://huggingface.co/{owner_quoted}/{repo_quoted}/resolve/{revision_quoted}/{filename_quoted}"
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def fetch_gguf_metadata_from_url(
|
| 313 |
+
url: str,
|
| 314 |
+
filename: str = "",
|
| 315 |
+
*,
|
| 316 |
+
hf_token: Optional[str] = None,
|
| 317 |
+
initial_request_size: int = 8 * 1024 * 1024,
|
| 318 |
+
max_request_size: int = 64 * 1024 * 1024,
|
| 319 |
+
timeout: float = 60.0,
|
| 320 |
+
) -> GGUFMetadata:
|
| 321 |
+
"""Fetch and parse GGUF metadata from a URL using HTTP range requests."""
|
| 322 |
+
try:
|
| 323 |
+
import httpx
|
| 324 |
+
except ImportError:
|
| 325 |
+
raise ImportError("httpx is required for remote GGUF fetching. Install with: pip install httpx")
|
| 326 |
+
|
| 327 |
+
headers = {
|
| 328 |
+
"User-Agent": "OWASP-AIBOM-Generator/1.0",
|
| 329 |
+
"Accept": "application/octet-stream",
|
| 330 |
+
}
|
| 331 |
+
if hf_token:
|
| 332 |
+
headers["Authorization"] = f"Bearer {hf_token}"
|
| 333 |
+
|
| 334 |
+
with httpx.Client(timeout=timeout, follow_redirects=False) as client:
|
| 335 |
+
current_url = url
|
| 336 |
+
for _ in range(5):
|
| 337 |
+
response = client.head(current_url, headers=headers)
|
| 338 |
+
if response.status_code in (301, 302, 303, 307, 308):
|
| 339 |
+
current_url = response.headers.get("location", current_url)
|
| 340 |
+
logger.debug(f"Redirecting to: {current_url}")
|
| 341 |
+
else:
|
| 342 |
+
break
|
| 343 |
+
actual_url = current_url
|
| 344 |
+
|
| 345 |
+
buffer = bytearray()
|
| 346 |
+
request_size = initial_request_size
|
| 347 |
+
|
| 348 |
+
with httpx.Client(timeout=timeout, follow_redirects=True) as client:
|
| 349 |
+
range_header = f"bytes=0-{request_size - 1}"
|
| 350 |
+
request_headers = {**headers, "Range": range_header}
|
| 351 |
+
|
| 352 |
+
logger.info(f"Fetching first {request_size // (1024*1024)}MB of GGUF metadata...")
|
| 353 |
+
response = client.get(actual_url, headers=request_headers)
|
| 354 |
+
response.raise_for_status()
|
| 355 |
+
buffer.extend(response.content)
|
| 356 |
+
|
| 357 |
+
max_retries = 5
|
| 358 |
+
for retry in range(max_retries):
|
| 359 |
+
try:
|
| 360 |
+
return parse_gguf_metadata(bytes(buffer), filename)
|
| 361 |
+
except BufferUnderrunError as exc:
|
| 362 |
+
if retry >= max_retries - 1:
|
| 363 |
+
raise
|
| 364 |
+
|
| 365 |
+
if exc.required_bytes:
|
| 366 |
+
needed = max(exc.required_bytes + 2 * 1024 * 1024, len(buffer) * 2)
|
| 367 |
+
else:
|
| 368 |
+
needed = len(buffer) * 2
|
| 369 |
+
|
| 370 |
+
additional_size = min(needed - len(buffer), max_request_size - len(buffer))
|
| 371 |
+
|
| 372 |
+
if additional_size <= 0 or len(buffer) >= max_request_size:
|
| 373 |
+
raise GGUFParseError(f"unable to parse metadata within {max_request_size} bytes")
|
| 374 |
+
|
| 375 |
+
logger.info(f"Need more data (retry {retry + 1}), fetching additional {additional_size // 1024}KB...")
|
| 376 |
+
|
| 377 |
+
range_header = f"bytes={len(buffer)}-{len(buffer) + additional_size - 1}"
|
| 378 |
+
request_headers = {**headers, "Range": range_header}
|
| 379 |
+
response = client.get(actual_url, headers=request_headers)
|
| 380 |
+
response.raise_for_status()
|
| 381 |
+
buffer.extend(response.content)
|
| 382 |
+
logger.info(f"Buffer now {len(buffer) // 1024}KB")
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def fetch_gguf_metadata_from_repo(
|
| 386 |
+
repo_id: str,
|
| 387 |
+
filename: str,
|
| 388 |
+
*,
|
| 389 |
+
revision: str = "main",
|
| 390 |
+
hf_token: Optional[str] = None,
|
| 391 |
+
**kwargs
|
| 392 |
+
) -> GGUFModelInfo:
|
| 393 |
+
"""Fetch and extract AIBOM-relevant metadata from a GGUF file in a HuggingFace repo."""
|
| 394 |
+
url = build_huggingface_url(repo_id, filename, revision)
|
| 395 |
+
logger.info(f"Fetching GGUF metadata from {repo_id}/{filename}")
|
| 396 |
+
|
| 397 |
+
gguf_metadata = fetch_gguf_metadata_from_url(
|
| 398 |
+
url,
|
| 399 |
+
filename=filename,
|
| 400 |
+
hf_token=hf_token,
|
| 401 |
+
**kwargs
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
return extract_model_info(gguf_metadata)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def list_gguf_files(repo_id: str, hf_token: Optional[str] = None) -> List[str]:
|
| 408 |
+
"""List GGUF files in a HuggingFace repository."""
|
| 409 |
+
from huggingface_hub import list_repo_files
|
| 410 |
+
|
| 411 |
+
files = list_repo_files(repo_id, token=hf_token)
|
| 412 |
+
return [f for f in files if f.endswith('.gguf')]
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def extract_all_gguf_metadata(
|
| 416 |
+
repo_id: str,
|
| 417 |
+
*,
|
| 418 |
+
hf_token: Optional[str] = None,
|
| 419 |
+
**kwargs
|
| 420 |
+
) -> List[GGUFModelInfo]:
|
| 421 |
+
"""Extract metadata from all GGUF files in a repository."""
|
| 422 |
+
gguf_files = list_gguf_files(repo_id, hf_token)
|
| 423 |
+
|
| 424 |
+
if not gguf_files:
|
| 425 |
+
logger.debug(f"No GGUF files found in {repo_id}")
|
| 426 |
+
return []
|
| 427 |
+
|
| 428 |
+
logger.info(f"Found {len(gguf_files)} GGUF files in {repo_id}")
|
| 429 |
+
|
| 430 |
+
results = []
|
| 431 |
+
for filename in gguf_files:
|
| 432 |
+
try:
|
| 433 |
+
info = fetch_gguf_metadata_from_repo(
|
| 434 |
+
repo_id,
|
| 435 |
+
filename,
|
| 436 |
+
hf_token=hf_token,
|
| 437 |
+
**kwargs
|
| 438 |
+
)
|
| 439 |
+
results.append(info)
|
| 440 |
+
logger.info(f" {filename}: architecture={info.architecture}")
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.warning(f" {filename}: failed to extract metadata: {e}")
|
| 443 |
+
|
| 444 |
+
return results
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def _map_core_fields(gguf_info: GGUFModelInfo) -> Dict[str, Any]:
|
| 448 |
+
"""Map basic model identity and tokenizer fields."""
|
| 449 |
+
metadata = {}
|
| 450 |
+
|
| 451 |
+
if gguf_info.architecture:
|
| 452 |
+
metadata["model_type"] = gguf_info.architecture
|
| 453 |
+
metadata["typeOfModel"] = gguf_info.architecture
|
| 454 |
+
|
| 455 |
+
if gguf_info.name:
|
| 456 |
+
metadata["name"] = gguf_info.name
|
| 457 |
+
|
| 458 |
+
if gguf_info.tokenizer_model:
|
| 459 |
+
metadata["tokenizer_class"] = gguf_info.tokenizer_model
|
| 460 |
+
|
| 461 |
+
if gguf_info.vocab_size:
|
| 462 |
+
metadata["vocab_size"] = gguf_info.vocab_size
|
| 463 |
+
|
| 464 |
+
if gguf_info.context_length:
|
| 465 |
+
metadata["context_length"] = gguf_info.context_length
|
| 466 |
+
|
| 467 |
+
metadata["gguf_filename"] = gguf_info.filename
|
| 468 |
+
|
| 469 |
+
return metadata
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def _map_supplementary_fields(gguf_info: GGUFModelInfo) -> Dict[str, Any]:
|
| 473 |
+
"""Map optional descriptive fields from GGUF."""
|
| 474 |
+
metadata = {}
|
| 475 |
+
|
| 476 |
+
if gguf_info.description:
|
| 477 |
+
metadata["description"] = gguf_info.description
|
| 478 |
+
|
| 479 |
+
if gguf_info.author:
|
| 480 |
+
metadata["suppliedBy"] = gguf_info.author
|
| 481 |
+
|
| 482 |
+
if gguf_info.license:
|
| 483 |
+
metadata["gguf_license"] = gguf_info.license
|
| 484 |
+
|
| 485 |
+
return metadata
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def _map_quantization(gguf_info: GGUFModelInfo) -> Dict[str, Any]:
|
| 489 |
+
"""Map quantization metadata."""
|
| 490 |
+
quantization = {}
|
| 491 |
+
|
| 492 |
+
if gguf_info.quantization_version:
|
| 493 |
+
quantization["version"] = gguf_info.quantization_version
|
| 494 |
+
if gguf_info.file_type:
|
| 495 |
+
quantization["file_type"] = gguf_info.file_type
|
| 496 |
+
|
| 497 |
+
return {"quantization": quantization} if quantization else {}
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def _map_hyperparameters(gguf_info: GGUFModelInfo) -> Dict[str, Any]:
|
| 501 |
+
"""Map inference-shape hyperparameters."""
|
| 502 |
+
hyperparams = {}
|
| 503 |
+
|
| 504 |
+
if gguf_info.context_length:
|
| 505 |
+
hyperparams["context_length"] = gguf_info.context_length
|
| 506 |
+
if gguf_info.embedding_length:
|
| 507 |
+
hyperparams["embedding_length"] = gguf_info.embedding_length
|
| 508 |
+
if gguf_info.block_count:
|
| 509 |
+
hyperparams["block_count"] = gguf_info.block_count
|
| 510 |
+
if gguf_info.attention_head_count:
|
| 511 |
+
hyperparams["attention_head_count"] = gguf_info.attention_head_count
|
| 512 |
+
if gguf_info.attention_head_count_kv:
|
| 513 |
+
hyperparams["attention_head_count_kv"] = gguf_info.attention_head_count_kv
|
| 514 |
+
if gguf_info.feed_forward_length:
|
| 515 |
+
hyperparams["feed_forward_length"] = gguf_info.feed_forward_length
|
| 516 |
+
if gguf_info.rope_dimension_count:
|
| 517 |
+
hyperparams["rope_dimension_count"] = gguf_info.rope_dimension_count
|
| 518 |
+
|
| 519 |
+
return {"hyperparameter": hyperparams} if hyperparams else {}
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def map_to_metadata(gguf_info: GGUFModelInfo) -> Dict[str, Any]:
|
| 523 |
+
metadata = _map_core_fields(gguf_info)
|
| 524 |
+
metadata |= _map_supplementary_fields(gguf_info)
|
| 525 |
+
metadata |= _map_quantization(gguf_info)
|
| 526 |
+
metadata |= _map_hyperparameters(gguf_info)
|
| 527 |
+
# TODO: add chat template field mapping
|
| 528 |
+
return metadata
|
src/models/model_file_extractors.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Protocol, Dict, Any, List, runtime_checkable
|
| 3 |
+
|
| 4 |
+
from .gguf_metadata import fetch_gguf_metadata_from_repo, map_to_metadata
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@runtime_checkable
|
| 10 |
+
class ModelFileExtractor(Protocol):
|
| 11 |
+
def can_extract(self, model_id: str) -> bool: ...
|
| 12 |
+
def extract_metadata(self, model_id: str) -> Dict[str, Any]: ...
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class GGUFFileExtractor:
|
| 16 |
+
|
| 17 |
+
def can_extract(self, model_id: str) -> bool:
|
| 18 |
+
try:
|
| 19 |
+
from huggingface_hub import list_repo_files
|
| 20 |
+
return any(f.endswith(".gguf") for f in list_repo_files(model_id))
|
| 21 |
+
except Exception:
|
| 22 |
+
return False
|
| 23 |
+
|
| 24 |
+
def extract_metadata(self, model_id: str) -> Dict[str, Any]:
|
| 25 |
+
from huggingface_hub import list_repo_files
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
files = list_repo_files(model_id)
|
| 29 |
+
gguf_files = [f for f in files if f.endswith(".gguf")]
|
| 30 |
+
if not gguf_files:
|
| 31 |
+
return {}
|
| 32 |
+
|
| 33 |
+
model_info = fetch_gguf_metadata_from_repo(model_id, gguf_files[0])
|
| 34 |
+
if model_info is None:
|
| 35 |
+
return {}
|
| 36 |
+
|
| 37 |
+
return map_to_metadata(model_info)
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.warning(f"GGUF extraction failed for {model_id}: {e}")
|
| 40 |
+
return {}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def default_extractors() -> List[ModelFileExtractor]:
|
| 44 |
+
return [GGUFFileExtractor()]
|
src/models/registry.py
ADDED
|
@@ -0,0 +1,535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Field Registry Manager for AI SBOM Generator
|
| 3 |
+
Combines registry loading, configuration generation, and field detection functionality
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import logging
|
| 10 |
+
from typing import Dict, Any, Optional, List, Tuple
|
| 11 |
+
from functools import lru_cache
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
class FieldRegistryManager:
|
| 16 |
+
"""
|
| 17 |
+
Field registry manager that handles:
|
| 18 |
+
1. Registry loading and validation
|
| 19 |
+
2. Configuration generation for utils.py compatibility
|
| 20 |
+
3. Field detection and JSONPath parsing
|
| 21 |
+
4. AIBOM completeness analysis
|
| 22 |
+
5. Scoring calculations
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, registry_path: Optional[str] = None):
|
| 26 |
+
"""
|
| 27 |
+
Initialize the field registry manager
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
registry_path: Path to the field registry JSON file. If None, auto-detects.
|
| 31 |
+
"""
|
| 32 |
+
if registry_path is None:
|
| 33 |
+
# Auto-detect registry path relative to this file
|
| 34 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 35 |
+
registry_path = os.path.join(current_dir, "field_registry.json")
|
| 36 |
+
|
| 37 |
+
self.registry_path = registry_path
|
| 38 |
+
self.registry = self._load_registry()
|
| 39 |
+
|
| 40 |
+
# Cache for performance
|
| 41 |
+
self._field_classification = None
|
| 42 |
+
self._completeness_profiles = None
|
| 43 |
+
self._validation_messages = None
|
| 44 |
+
self._scoring_weights = None
|
| 45 |
+
|
| 46 |
+
def _load_registry(self) -> Dict[str, Any]:
|
| 47 |
+
"""Load the complete field registry from JSON file"""
|
| 48 |
+
try:
|
| 49 |
+
with open(self.registry_path, 'r', encoding='utf-8') as f:
|
| 50 |
+
registry = json.load(f)
|
| 51 |
+
|
| 52 |
+
# Validate basic structure
|
| 53 |
+
required_sections = ["fields"]
|
| 54 |
+
missing_sections = [section for section in required_sections if section not in registry]
|
| 55 |
+
|
| 56 |
+
if missing_sections:
|
| 57 |
+
raise ValueError(f"Registry missing required sections: {missing_sections}")
|
| 58 |
+
|
| 59 |
+
# Validate fields structure
|
| 60 |
+
fields = registry.get('fields', {})
|
| 61 |
+
if not fields:
|
| 62 |
+
raise ValueError("Registry 'fields' section is empty")
|
| 63 |
+
|
| 64 |
+
logger.info(f"✅ Field registry loaded: {len(fields)} fields from {self.registry_path}")
|
| 65 |
+
return registry
|
| 66 |
+
|
| 67 |
+
except FileNotFoundError:
|
| 68 |
+
raise FileNotFoundError(f"Field registry not found at: {self.registry_path}")
|
| 69 |
+
except json.JSONDecodeError as e:
|
| 70 |
+
raise ValueError(f"Invalid JSON in field registry: {e}")
|
| 71 |
+
except Exception as e:
|
| 72 |
+
raise Exception(f"Failed to load field registry: {e}")
|
| 73 |
+
|
| 74 |
+
# =============================================================================
|
| 75 |
+
# CONFIGURATION GENERATION
|
| 76 |
+
# =============================================================================
|
| 77 |
+
|
| 78 |
+
@lru_cache(maxsize=1)
|
| 79 |
+
def get_scoring_config(self) -> Dict[str, Any]:
|
| 80 |
+
"""Get scoring configuration from registry"""
|
| 81 |
+
return self.registry.get('scoring_config', {})
|
| 82 |
+
|
| 83 |
+
@lru_cache(maxsize=1)
|
| 84 |
+
def get_aibom_config(self) -> Dict[str, Any]:
|
| 85 |
+
"""Get AIBOM generation configuration from registry"""
|
| 86 |
+
return self.registry.get('aibom_config', {})
|
| 87 |
+
|
| 88 |
+
@lru_cache(maxsize=1)
|
| 89 |
+
def get_field_definitions(self) -> Dict[str, Any]:
|
| 90 |
+
"""Get all field definitions from registry"""
|
| 91 |
+
return self.registry.get('fields', {})
|
| 92 |
+
|
| 93 |
+
def generate_field_classification(self) -> Dict[str, Any]:
|
| 94 |
+
"""
|
| 95 |
+
Generate FIELD_CLASSIFICATION dictionary from registry
|
| 96 |
+
"""
|
| 97 |
+
if self._field_classification is not None:
|
| 98 |
+
return self._field_classification
|
| 99 |
+
|
| 100 |
+
fields = self.get_field_definitions()
|
| 101 |
+
classification = {}
|
| 102 |
+
|
| 103 |
+
for field_name, field_config in fields.items():
|
| 104 |
+
jsonpath = field_config.get("jsonpath", "")
|
| 105 |
+
param_type = "AITX" if "properties[" in jsonpath else "CDX"
|
| 106 |
+
missing_msg = field_config.get("validation_message", {}).get("missing", "")
|
| 107 |
+
is_gguf = "GGUF" in missing_msg
|
| 108 |
+
|
| 109 |
+
classification[field_name] = {
|
| 110 |
+
"tier": field_config.get("tier", "supplementary"),
|
| 111 |
+
"weight": field_config.get("weight", 1),
|
| 112 |
+
"category": field_config.get("category", "unknown"),
|
| 113 |
+
"parameter_type": param_type,
|
| 114 |
+
"reference_urls": field_config.get("reference_urls", {}),
|
| 115 |
+
"jsonpath": jsonpath,
|
| 116 |
+
"is_gguf": is_gguf
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
self._field_classification = classification
|
| 120 |
+
return classification
|
| 121 |
+
|
| 122 |
+
def generate_completeness_profiles(self) -> Dict[str, Any]:
|
| 123 |
+
"""
|
| 124 |
+
Generate COMPLETENESS_PROFILES dictionary from registry
|
| 125 |
+
"""
|
| 126 |
+
if self._completeness_profiles is not None:
|
| 127 |
+
return self._completeness_profiles
|
| 128 |
+
|
| 129 |
+
scoring_config = self.get_scoring_config()
|
| 130 |
+
profiles = scoring_config.get("scoring_profiles", {})
|
| 131 |
+
|
| 132 |
+
# Convert to utils.py format
|
| 133 |
+
completeness_profiles = {}
|
| 134 |
+
for profile_name, profile_config in profiles.items():
|
| 135 |
+
completeness_profiles[profile_name] = {
|
| 136 |
+
"description": profile_config.get("description", f"{profile_name.title()} completeness profile"),
|
| 137 |
+
"required_fields": profile_config.get("required_fields", []),
|
| 138 |
+
"minimum_score": profile_config.get("minimum_score", 50)
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
# Fallback profiles if none defined in registry
|
| 142 |
+
if not completeness_profiles:
|
| 143 |
+
completeness_profiles = {
|
| 144 |
+
"basic": {
|
| 145 |
+
"description": "Minimal fields required for identification",
|
| 146 |
+
"required_fields": ["bomFormat", "specVersion", "serialNumber", "version", "name"],
|
| 147 |
+
"minimum_score": 40
|
| 148 |
+
},
|
| 149 |
+
"standard": {
|
| 150 |
+
"description": "Comprehensive fields for proper documentation",
|
| 151 |
+
"required_fields": ["bomFormat", "specVersion", "serialNumber", "version", "name",
|
| 152 |
+
"downloadLocation", "primaryPurpose", "suppliedBy"],
|
| 153 |
+
"minimum_score": 70
|
| 154 |
+
},
|
| 155 |
+
"advanced": {
|
| 156 |
+
"description": "Extensive documentation for maximum transparency",
|
| 157 |
+
"required_fields": ["bomFormat", "specVersion", "serialNumber", "version", "name",
|
| 158 |
+
"downloadLocation", "primaryPurpose", "suppliedBy",
|
| 159 |
+
"type", "purl", "description", "licenses", "hyperparameter", "technicalLimitations",
|
| 160 |
+
"energyConsumption", "safetyRiskAssessment", "typeOfModel"],
|
| 161 |
+
"minimum_score": 85
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
self._completeness_profiles = completeness_profiles
|
| 166 |
+
return completeness_profiles
|
| 167 |
+
|
| 168 |
+
def generate_validation_messages(self) -> Dict[str, Any]:
|
| 169 |
+
"""
|
| 170 |
+
Generate VALIDATION_MESSAGES dictionary from registry
|
| 171 |
+
"""
|
| 172 |
+
if self._validation_messages is not None:
|
| 173 |
+
return self._validation_messages
|
| 174 |
+
|
| 175 |
+
fields = self.get_field_definitions()
|
| 176 |
+
validation_messages = {}
|
| 177 |
+
|
| 178 |
+
for field_name, field_config in fields.items():
|
| 179 |
+
validation_msg = field_config.get("validation_message", {})
|
| 180 |
+
if validation_msg:
|
| 181 |
+
validation_messages[field_name] = {
|
| 182 |
+
"missing": validation_msg.get("missing", f"Missing field: {field_name}"),
|
| 183 |
+
"recommendation": validation_msg.get("recommendation", f"Consider adding {field_name} field")
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
self._validation_messages = validation_messages
|
| 187 |
+
return validation_messages
|
| 188 |
+
|
| 189 |
+
def get_configurable_scoring_weights(self) -> Dict[str, Any]:
|
| 190 |
+
"""Get configurable scoring weights from registry"""
|
| 191 |
+
if self._scoring_weights is not None:
|
| 192 |
+
return self._scoring_weights
|
| 193 |
+
|
| 194 |
+
scoring_config = self.get_scoring_config()
|
| 195 |
+
|
| 196 |
+
weights = {
|
| 197 |
+
"tier_weights": scoring_config.get("tier_weights", {
|
| 198 |
+
"critical": 3,
|
| 199 |
+
"important": 2,
|
| 200 |
+
"supplementary": 1
|
| 201 |
+
}),
|
| 202 |
+
"category_weights": scoring_config.get("category_weights", {
|
| 203 |
+
"required_fields": 20,
|
| 204 |
+
"metadata": 20,
|
| 205 |
+
"component_basic": 20,
|
| 206 |
+
"component_model_card": 30,
|
| 207 |
+
"external_references": 10
|
| 208 |
+
}),
|
| 209 |
+
"algorithm_config": scoring_config.get("algorithm_config", {
|
| 210 |
+
"type": "weighted_sum",
|
| 211 |
+
"max_score": 100,
|
| 212 |
+
"normalization": "category_based"
|
| 213 |
+
})
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
self._scoring_weights = weights
|
| 217 |
+
return weights
|
| 218 |
+
|
| 219 |
+
# =============================================================================
|
| 220 |
+
# FIELD DETECTION
|
| 221 |
+
# =============================================================================
|
| 222 |
+
|
| 223 |
+
def _get_nested_value(self, data: dict, path: str) -> Tuple[bool, Any]:
|
| 224 |
+
"""
|
| 225 |
+
Get value from nested dictionary using dot notation and array filters
|
| 226 |
+
Supports paths like: $.components[0].name, $.metadata.properties[?(@.name=='primaryPurpose')].value
|
| 227 |
+
"""
|
| 228 |
+
try:
|
| 229 |
+
# Remove leading $. if present
|
| 230 |
+
if path.startswith('$.'):
|
| 231 |
+
path = path[2:]
|
| 232 |
+
|
| 233 |
+
# Handle special JSONPath-like syntax for property/array filtering
|
| 234 |
+
# Supports [?(@.field=='value')]
|
| 235 |
+
if '[?(@.' in path:
|
| 236 |
+
return self._handle_property_array_path(data, path)
|
| 237 |
+
|
| 238 |
+
# Split path and traverse
|
| 239 |
+
parts = self._split_path(path)
|
| 240 |
+
current = data
|
| 241 |
+
|
| 242 |
+
for part in parts:
|
| 243 |
+
if '[' in part and ']' in part:
|
| 244 |
+
# Handle array access like components[0]
|
| 245 |
+
key, index_str = part.split('[')
|
| 246 |
+
index = int(index_str.rstrip(']'))
|
| 247 |
+
|
| 248 |
+
if key and key in current:
|
| 249 |
+
current = current[key]
|
| 250 |
+
|
| 251 |
+
if isinstance(current, list) and 0 <= index < len(current):
|
| 252 |
+
current = current[index]
|
| 253 |
+
else:
|
| 254 |
+
return False, None
|
| 255 |
+
else:
|
| 256 |
+
# Regular key access
|
| 257 |
+
if isinstance(current, dict) and part in current:
|
| 258 |
+
current = current[part]
|
| 259 |
+
else:
|
| 260 |
+
return False, None
|
| 261 |
+
|
| 262 |
+
# Check if value is meaningful
|
| 263 |
+
if current is not None and current != "" and current != []:
|
| 264 |
+
return True, current
|
| 265 |
+
|
| 266 |
+
return False, None
|
| 267 |
+
|
| 268 |
+
except Exception as e:
|
| 269 |
+
logger.error(f"Error getting value at path {path}: {e}")
|
| 270 |
+
return False, None
|
| 271 |
+
|
| 272 |
+
def _handle_property_array_path(self, data: dict, path: str) -> Tuple[bool, Any]:
|
| 273 |
+
"""
|
| 274 |
+
Handle generic JSONPath-like syntax for array filtering
|
| 275 |
+
Supports: base_path[?(@.key=='value')].optional_suffix
|
| 276 |
+
Example: metadata.component.externalReferences[?(@.type=='documentation')]
|
| 277 |
+
Example: metadata.properties[?(@.name=='primaryPurpose')].value
|
| 278 |
+
"""
|
| 279 |
+
try:
|
| 280 |
+
# Regex to capture: Base Path, Filter Key, Filter Value, Optional Suffix
|
| 281 |
+
# matches: something[?(@.key=='val')] or something[?(@.key=='val')].sub
|
| 282 |
+
pattern = r'(.+)\[\?\(@\.(\w+)==\'([^\']+)\'\)\](.*)'
|
| 283 |
+
match = re.search(pattern, path)
|
| 284 |
+
|
| 285 |
+
if not match:
|
| 286 |
+
return False, None
|
| 287 |
+
|
| 288 |
+
base_path, filter_key, filter_val, suffix = match.groups()
|
| 289 |
+
|
| 290 |
+
# Get the list at base_path
|
| 291 |
+
base_found, base_list = self._get_nested_value(data, base_path)
|
| 292 |
+
if not base_found or not isinstance(base_list, list):
|
| 293 |
+
return False, None
|
| 294 |
+
|
| 295 |
+
# Find matching item
|
| 296 |
+
found_item = None
|
| 297 |
+
for item in base_list:
|
| 298 |
+
if isinstance(item, dict) and str(item.get(filter_key)) == filter_val:
|
| 299 |
+
found_item = item
|
| 300 |
+
break
|
| 301 |
+
|
| 302 |
+
if found_item is None:
|
| 303 |
+
return False, None
|
| 304 |
+
|
| 305 |
+
# If there's a suffix (e.g., .value), traverse it
|
| 306 |
+
if suffix:
|
| 307 |
+
if suffix.startswith('.'):
|
| 308 |
+
suffix = suffix[1:]
|
| 309 |
+
return self._get_nested_value(found_item, suffix)
|
| 310 |
+
|
| 311 |
+
# No suffix, return the item itself
|
| 312 |
+
return True, found_item
|
| 313 |
+
|
| 314 |
+
except Exception as e:
|
| 315 |
+
logger.error(f"Error handling array path {path}: {e}")
|
| 316 |
+
return False, None
|
| 317 |
+
|
| 318 |
+
except Exception as e:
|
| 319 |
+
logger.error(f"Error handling property array path {path}: {e}")
|
| 320 |
+
return False, None
|
| 321 |
+
|
| 322 |
+
def _split_path(self, path: str) -> List[str]:
|
| 323 |
+
"""Split path into parts, handling array notation"""
|
| 324 |
+
parts = []
|
| 325 |
+
current_part = ""
|
| 326 |
+
in_brackets = False
|
| 327 |
+
|
| 328 |
+
for char in path:
|
| 329 |
+
if char == '[':
|
| 330 |
+
in_brackets = True
|
| 331 |
+
current_part += char
|
| 332 |
+
elif char == ']':
|
| 333 |
+
in_brackets = False
|
| 334 |
+
current_part += char
|
| 335 |
+
elif char == '.' and not in_brackets:
|
| 336 |
+
if current_part:
|
| 337 |
+
parts.append(current_part)
|
| 338 |
+
current_part = ""
|
| 339 |
+
else:
|
| 340 |
+
current_part += char
|
| 341 |
+
|
| 342 |
+
if current_part:
|
| 343 |
+
parts.append(current_part)
|
| 344 |
+
|
| 345 |
+
return parts
|
| 346 |
+
|
| 347 |
+
def detect_field_presence(self, aibom: dict, field_path: str) -> Tuple[bool, Any]:
|
| 348 |
+
"""
|
| 349 |
+
Detect if a field exists at the given path in the AIBOM
|
| 350 |
+
Returns: (field_exists, field_value)
|
| 351 |
+
"""
|
| 352 |
+
return self._get_nested_value(aibom, field_path)
|
| 353 |
+
|
| 354 |
+
def analyze_aibom_completeness(self, aibom: dict) -> Dict[str, Any]:
|
| 355 |
+
"""
|
| 356 |
+
Analyze AIBOM completeness against the enhanced field registry
|
| 357 |
+
Compatible with enhanced registry structure: registry['fields'][field_name]
|
| 358 |
+
"""
|
| 359 |
+
results = {
|
| 360 |
+
'category_scores': {},
|
| 361 |
+
'total_score': 0,
|
| 362 |
+
'field_details': {},
|
| 363 |
+
'summary': {}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
# Get fields from enhanced registry structure
|
| 367 |
+
fields = self.get_field_definitions()
|
| 368 |
+
if not fields:
|
| 369 |
+
logger.warning("❌ No fields found in registry")
|
| 370 |
+
return results
|
| 371 |
+
|
| 372 |
+
# Get scoring configuration
|
| 373 |
+
scoring_weights = self.get_configurable_scoring_weights()
|
| 374 |
+
category_weights = scoring_weights.get('category_weights', {})
|
| 375 |
+
|
| 376 |
+
# Group fields by category
|
| 377 |
+
categories = {}
|
| 378 |
+
for field_name, field_config in fields.items():
|
| 379 |
+
category = field_config.get('category', 'unknown')
|
| 380 |
+
if category not in categories:
|
| 381 |
+
categories[category] = []
|
| 382 |
+
categories[category].append((field_name, field_config))
|
| 383 |
+
|
| 384 |
+
logger.info(f"🔍 Analyzing {len(fields)} fields across {len(categories)} categories")
|
| 385 |
+
|
| 386 |
+
total_weighted_score = 0
|
| 387 |
+
|
| 388 |
+
for category_name, category_fields in categories.items():
|
| 389 |
+
category_weight = category_weights.get(category_name, 20)
|
| 390 |
+
|
| 391 |
+
present_fields = 0
|
| 392 |
+
total_fields = len(category_fields)
|
| 393 |
+
field_details = {}
|
| 394 |
+
|
| 395 |
+
for field_name, field_config in category_fields:
|
| 396 |
+
field_path = field_config.get('jsonpath', '')
|
| 397 |
+
tier = field_config.get('tier', 'supplementary')
|
| 398 |
+
weight = field_config.get('weight', 1)
|
| 399 |
+
|
| 400 |
+
if not field_path:
|
| 401 |
+
field_details[field_name] = {
|
| 402 |
+
'present': False,
|
| 403 |
+
'value': None,
|
| 404 |
+
'path': field_path,
|
| 405 |
+
'tier': tier,
|
| 406 |
+
'weight': weight,
|
| 407 |
+
'error': 'No jsonpath defined'
|
| 408 |
+
}
|
| 409 |
+
continue
|
| 410 |
+
|
| 411 |
+
is_present, value = self.detect_field_presence(aibom, field_path)
|
| 412 |
+
|
| 413 |
+
field_details[field_name] = {
|
| 414 |
+
'present': is_present,
|
| 415 |
+
'value': value,
|
| 416 |
+
'path': field_path,
|
| 417 |
+
'tier': tier,
|
| 418 |
+
'weight': weight
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
if is_present:
|
| 422 |
+
present_fields += 1
|
| 423 |
+
|
| 424 |
+
# Calculate category score
|
| 425 |
+
category_percentage = (present_fields / total_fields) * 100 if total_fields > 0 else 0
|
| 426 |
+
category_score = (category_percentage / 100) * category_weight
|
| 427 |
+
|
| 428 |
+
results['category_scores'][category_name] = category_score
|
| 429 |
+
results['field_details'][category_name] = field_details
|
| 430 |
+
results['summary'][category_name] = {
|
| 431 |
+
'present': present_fields,
|
| 432 |
+
'total': total_fields,
|
| 433 |
+
'percentage': category_percentage,
|
| 434 |
+
'weight': category_weight
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
total_weighted_score += category_score
|
| 438 |
+
|
| 439 |
+
results['total_score'] = total_weighted_score
|
| 440 |
+
|
| 441 |
+
return results
|
| 442 |
+
|
| 443 |
+
# =============================================================================
|
| 444 |
+
# UTILITY METHODS
|
| 445 |
+
# =============================================================================
|
| 446 |
+
|
| 447 |
+
def get_field_info(self, field_name: str) -> Optional[Dict[str, Any]]:
|
| 448 |
+
"""Get complete information for a specific field"""
|
| 449 |
+
fields = self.get_field_definitions()
|
| 450 |
+
return fields.get(field_name)
|
| 451 |
+
|
| 452 |
+
def get_field_jsonpath(self, field_name: str) -> Optional[str]:
|
| 453 |
+
"""Get JSONPath expression for a specific field"""
|
| 454 |
+
field_info = self.get_field_info(field_name)
|
| 455 |
+
return field_info.get("jsonpath") if field_info else None
|
| 456 |
+
|
| 457 |
+
def get_fields_by_category(self, category: str) -> List[str]:
|
| 458 |
+
"""Get all field names in a specific category"""
|
| 459 |
+
fields = self.get_field_definitions()
|
| 460 |
+
return [
|
| 461 |
+
field_name for field_name, field_config in fields.items()
|
| 462 |
+
if field_config.get("category") == category
|
| 463 |
+
]
|
| 464 |
+
|
| 465 |
+
def get_fields_by_tier(self, tier: str) -> List[str]:
|
| 466 |
+
"""Get all field names in a specific tier"""
|
| 467 |
+
fields = self.get_field_definitions()
|
| 468 |
+
return [
|
| 469 |
+
field_name for field_name, field_config in fields.items()
|
| 470 |
+
if field_config.get("tier") == tier
|
| 471 |
+
]
|
| 472 |
+
|
| 473 |
+
def validate_registry_integrity(self) -> Dict[str, Any]:
|
| 474 |
+
"""Validate the integrity of the loaded registry"""
|
| 475 |
+
validation_results = {
|
| 476 |
+
"valid": True,
|
| 477 |
+
"errors": [],
|
| 478 |
+
"warnings": [],
|
| 479 |
+
"field_count": 0,
|
| 480 |
+
"category_distribution": {},
|
| 481 |
+
"tier_distribution": {}
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
try:
|
| 485 |
+
fields = self.get_field_definitions()
|
| 486 |
+
validation_results["field_count"] = len(fields)
|
| 487 |
+
|
| 488 |
+
# Check category and tier distribution
|
| 489 |
+
categories = {}
|
| 490 |
+
tiers = {}
|
| 491 |
+
|
| 492 |
+
for field_name, field_config in fields.items():
|
| 493 |
+
# Check required field properties
|
| 494 |
+
required_props = ["tier", "weight", "category", "jsonpath"]
|
| 495 |
+
missing_props = [prop for prop in required_props if prop not in field_config]
|
| 496 |
+
|
| 497 |
+
if missing_props:
|
| 498 |
+
validation_results["errors"].append(
|
| 499 |
+
f"Field '{field_name}' missing properties: {missing_props}"
|
| 500 |
+
)
|
| 501 |
+
validation_results["valid"] = False
|
| 502 |
+
|
| 503 |
+
# Count categories and tiers
|
| 504 |
+
category = field_config.get("category", "unknown")
|
| 505 |
+
tier = field_config.get("tier", "unknown")
|
| 506 |
+
|
| 507 |
+
categories[category] = categories.get(category, 0) + 1
|
| 508 |
+
tiers[tier] = tiers.get(tier, 0) + 1
|
| 509 |
+
|
| 510 |
+
validation_results["category_distribution"] = categories
|
| 511 |
+
validation_results["tier_distribution"] = tiers
|
| 512 |
+
|
| 513 |
+
# Check scoring configuration
|
| 514 |
+
scoring_config = self.get_scoring_config()
|
| 515 |
+
if not scoring_config.get("tier_weights"):
|
| 516 |
+
validation_results["warnings"].append("Missing tier_weights in scoring_config")
|
| 517 |
+
|
| 518 |
+
if not scoring_config.get("category_weights"):
|
| 519 |
+
validation_results["warnings"].append("Missing category_weights in scoring_config")
|
| 520 |
+
|
| 521 |
+
except Exception as e:
|
| 522 |
+
validation_results["valid"] = False
|
| 523 |
+
validation_results["errors"].append(f"Registry validation error: {e}")
|
| 524 |
+
|
| 525 |
+
return validation_results
|
| 526 |
+
|
| 527 |
+
# Global Instance
|
| 528 |
+
_registry_manager = None
|
| 529 |
+
|
| 530 |
+
def get_field_registry_manager() -> FieldRegistryManager:
|
| 531 |
+
"""Get the global field registry manager instance (singleton pattern)"""
|
| 532 |
+
global _registry_manager
|
| 533 |
+
if _registry_manager is None:
|
| 534 |
+
_registry_manager = FieldRegistryManager()
|
| 535 |
+
return _registry_manager
|
src/models/schemas.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
from typing import Any, Dict, List, Optional
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
|
| 6 |
+
# --- Enums (from enhanced_extractor.py) ---
|
| 7 |
+
class DataSource(str, Enum):
|
| 8 |
+
"""Enumeration of data sources for provenance tracking"""
|
| 9 |
+
HF_API = "huggingface_api"
|
| 10 |
+
MODEL_CARD = "model_card_yaml"
|
| 11 |
+
README_TEXT = "readme_text"
|
| 12 |
+
CONFIG_FILE = "config_file"
|
| 13 |
+
REPOSITORY_FILES = "repository_files"
|
| 14 |
+
EXTERNAL_REFERENCE = "external_reference"
|
| 15 |
+
INTELLIGENT_DEFAULT = "intelligent_default"
|
| 16 |
+
PLACEHOLDER = "placeholder"
|
| 17 |
+
REGISTRY_DRIVEN = "registry_driven"
|
| 18 |
+
|
| 19 |
+
class ConfidenceLevel(str, Enum):
|
| 20 |
+
"""Confidence levels for extracted data"""
|
| 21 |
+
HIGH = "high" # Direct API data, official sources
|
| 22 |
+
MEDIUM = "medium" # Inferred from reliable patterns
|
| 23 |
+
LOW = "low" # Weak inference or pattern matching
|
| 24 |
+
NONE = "none" # Placeholder values
|
| 25 |
+
|
| 26 |
+
# --- internal Models ---
|
| 27 |
+
class ExtractionResult(BaseModel):
|
| 28 |
+
"""Container for extraction results with full provenance"""
|
| 29 |
+
value: Any
|
| 30 |
+
source: DataSource
|
| 31 |
+
confidence: ConfidenceLevel
|
| 32 |
+
extraction_method: str
|
| 33 |
+
timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
|
| 34 |
+
fallback_chain: List[str] = Field(default_factory=list)
|
| 35 |
+
|
| 36 |
+
def __str__(self):
|
| 37 |
+
return f"{self.value} (source: {self.source.value}, confidence: {self.confidence.value})"
|
| 38 |
+
|
| 39 |
+
# --- API Request Models ---
|
| 40 |
+
class GenerateRequest(BaseModel):
|
| 41 |
+
model_id: str
|
| 42 |
+
include_inference: bool = True
|
| 43 |
+
use_best_practices: bool = True
|
| 44 |
+
hf_token: Optional[str] = None
|
| 45 |
+
|
| 46 |
+
class BatchRequest(BaseModel):
|
| 47 |
+
model_ids: List[str]
|
| 48 |
+
include_inference: bool = True
|
| 49 |
+
use_best_practices: bool = True
|
| 50 |
+
hf_token: Optional[str] = None
|
| 51 |
+
|
| 52 |
+
# --- API Response Models ---
|
| 53 |
+
class AIBOMResponse(BaseModel):
|
| 54 |
+
aibom: Dict[str, Any]
|
| 55 |
+
model_id: str
|
| 56 |
+
generated_at: str
|
| 57 |
+
request_id: str
|
| 58 |
+
download_url: str
|
| 59 |
+
completeness_score: Optional[Dict[str, Any]] = None
|
| 60 |
+
|
| 61 |
+
class EnhancementReport(BaseModel):
|
| 62 |
+
ai_enhanced: bool = False
|
| 63 |
+
ai_model: Optional[str] = None
|
| 64 |
+
original_score: Dict[str, Any]
|
| 65 |
+
final_score: Dict[str, Any]
|
| 66 |
+
improvement: float = 0
|
src/models/scoring.py
ADDED
|
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import logging
|
| 3 |
+
import re
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from typing import Dict, List, Optional, Any, Union
|
| 7 |
+
from enum import Enum
|
| 8 |
+
from .registry import get_field_registry_manager
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
# Validation severity levels
|
| 13 |
+
class ValidationSeverity(Enum):
|
| 14 |
+
ERROR = "error"
|
| 15 |
+
WARNING = "warning"
|
| 16 |
+
INFO = "info"
|
| 17 |
+
|
| 18 |
+
# Initialize registry manager
|
| 19 |
+
try:
|
| 20 |
+
REGISTRY_MANAGER = get_field_registry_manager()
|
| 21 |
+
FIELD_CLASSIFICATION = REGISTRY_MANAGER.generate_field_classification()
|
| 22 |
+
COMPLETENESS_PROFILES = REGISTRY_MANAGER.generate_completeness_profiles()
|
| 23 |
+
VALIDATION_MESSAGES = REGISTRY_MANAGER.generate_validation_messages()
|
| 24 |
+
SCORING_WEIGHTS = REGISTRY_MANAGER.get_configurable_scoring_weights()
|
| 25 |
+
logger.info(f"✅ Registry-driven configuration loaded: {len(FIELD_CLASSIFICATION)} fields")
|
| 26 |
+
except Exception as e:
|
| 27 |
+
logger.error(f"❌ Failed to load registry configuration: {e}")
|
| 28 |
+
# Fallback to empty defaults or handle gracefully
|
| 29 |
+
FIELD_CLASSIFICATION = {}
|
| 30 |
+
COMPLETENESS_PROFILES = {}
|
| 31 |
+
VALIDATION_MESSAGES = {}
|
| 32 |
+
SCORING_WEIGHTS = {}
|
| 33 |
+
|
| 34 |
+
# Load SPDX licenses
|
| 35 |
+
try:
|
| 36 |
+
schema_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "schemas", "spdx.schema.json")
|
| 37 |
+
with open(schema_path, "r", encoding="utf-8") as f:
|
| 38 |
+
_spdx_schema = json.load(f)
|
| 39 |
+
SPDX_LICENSES = set(_spdx_schema.get("enum", []))
|
| 40 |
+
logger.info(f"✅ SPDX licenses schema loaded: {len(SPDX_LICENSES)} licenses")
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.error(f"❌ Failed to load SPDX schema: {e}")
|
| 43 |
+
SPDX_LICENSES = {"MIT", "Apache-2.0", "GPL-3.0-only", "GPL-2.0-only", "LGPL-3.0-only",
|
| 44 |
+
"BSD-3-Clause", "BSD-2-Clause", "CC-BY-4.0", "CC-BY-SA-4.0", "CC0-1.0",
|
| 45 |
+
"Unlicense", "NONE"}
|
| 46 |
+
|
| 47 |
+
# Build JSON Schema Registry
|
| 48 |
+
JSON_SCHEMA_REGISTRY = None
|
| 49 |
+
try:
|
| 50 |
+
from referencing import Registry, Resource
|
| 51 |
+
registry = Registry()
|
| 52 |
+
schemas_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "schemas")
|
| 53 |
+
if os.path.exists(schemas_dir):
|
| 54 |
+
for filename in os.listdir(schemas_dir):
|
| 55 |
+
if filename.endswith(".json"):
|
| 56 |
+
with open(os.path.join(schemas_dir, filename), "r", encoding="utf-8") as schema_file:
|
| 57 |
+
schema_data = json.load(schema_file)
|
| 58 |
+
resource = Resource.from_contents(schema_data)
|
| 59 |
+
schema_id = schema_data.get("$id", "")
|
| 60 |
+
if schema_id:
|
| 61 |
+
registry = registry.with_resource(uri=schema_id, resource=resource)
|
| 62 |
+
registry = registry.with_resource(uri=filename, resource=resource)
|
| 63 |
+
JSON_SCHEMA_REGISTRY = registry
|
| 64 |
+
logger.info("✅ JSON Schema Registry loaded for local ref resolution")
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"❌ Failed to build JSON Schema Registry: {e}")
|
| 67 |
+
|
| 68 |
+
def validate_spdx(license_entry):
|
| 69 |
+
if isinstance(license_entry, list):
|
| 70 |
+
return all(lic in SPDX_LICENSES for lic in license_entry)
|
| 71 |
+
return license_entry in SPDX_LICENSES
|
| 72 |
+
|
| 73 |
+
def check_field_in_aibom(aibom: Dict[str, Any], field: str) -> bool:
|
| 74 |
+
"""
|
| 75 |
+
Check if a field is present in the AIBOM (Legacy/Standard Layout check).
|
| 76 |
+
Optimized to use a flattened set if possible, but for individual check this is fine.
|
| 77 |
+
"""
|
| 78 |
+
# Quick top-level check
|
| 79 |
+
if field in aibom:
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
# Metadata Check
|
| 83 |
+
metadata = aibom.get("metadata", {})
|
| 84 |
+
if field in metadata:
|
| 85 |
+
return True
|
| 86 |
+
|
| 87 |
+
# Metadata Properties
|
| 88 |
+
if "properties" in metadata:
|
| 89 |
+
for prop in metadata["properties"]:
|
| 90 |
+
if prop.get("name") in {field, f"spdx:{field}"}:
|
| 91 |
+
return True
|
| 92 |
+
|
| 93 |
+
# Component Check (only first component as per original logic)
|
| 94 |
+
components = aibom.get("components", [])
|
| 95 |
+
if components:
|
| 96 |
+
component = components[0]
|
| 97 |
+
if field in component:
|
| 98 |
+
return True
|
| 99 |
+
|
| 100 |
+
# Component Properties
|
| 101 |
+
if "properties" in component:
|
| 102 |
+
for prop in component["properties"]:
|
| 103 |
+
if prop.get("name") in {field, f"spdx:{field}"}:
|
| 104 |
+
return True
|
| 105 |
+
|
| 106 |
+
# Model Card
|
| 107 |
+
model_card = component.get("modelCard", {})
|
| 108 |
+
if field in model_card:
|
| 109 |
+
return True
|
| 110 |
+
|
| 111 |
+
if "modelParameters" in model_card and field in model_card["modelParameters"]:
|
| 112 |
+
return True
|
| 113 |
+
|
| 114 |
+
# Considerations Mapping
|
| 115 |
+
if "considerations" in model_card:
|
| 116 |
+
considerations = model_card["considerations"]
|
| 117 |
+
field_mappings = {
|
| 118 |
+
"technicalLimitations": ["technicalLimitations", "limitations"],
|
| 119 |
+
"safetyRiskAssessment": ["ethicalConsiderations", "safetyRiskAssessment"],
|
| 120 |
+
"energyConsumption": ["environmentalConsiderations", "energyConsumption"]
|
| 121 |
+
}
|
| 122 |
+
if field in field_mappings:
|
| 123 |
+
if any(sec in considerations and considerations[sec] for sec in field_mappings[field]):
|
| 124 |
+
return True
|
| 125 |
+
if field in considerations:
|
| 126 |
+
return True
|
| 127 |
+
|
| 128 |
+
# External References Check
|
| 129 |
+
components = aibom.get("components", [])
|
| 130 |
+
if components:
|
| 131 |
+
ext_refs = components[0].get("externalReferences", [])
|
| 132 |
+
if field == "downloadLocation":
|
| 133 |
+
return any(ref.get("type") in ["distribution", "website"] and ref.get("url") for ref in ext_refs)
|
| 134 |
+
if field == "vcs":
|
| 135 |
+
return any(ref.get("type") == "vcs" and ref.get("url") for ref in ext_refs)
|
| 136 |
+
if field == "website":
|
| 137 |
+
return any(ref.get("type") == "website" and ref.get("url") for ref in ext_refs)
|
| 138 |
+
if field == "paper":
|
| 139 |
+
return any(ref.get("type") == "documentation" and ref.get("url") for ref in ext_refs)
|
| 140 |
+
|
| 141 |
+
return False
|
| 142 |
+
|
| 143 |
+
def check_field_with_enhanced_results(aibom: Dict[str, Any], field: str, extraction_results: Optional[Dict[str, Any]] = None) -> bool:
|
| 144 |
+
"""
|
| 145 |
+
Enhanced field detection using registry manager and extraction results.
|
| 146 |
+
"""
|
| 147 |
+
try:
|
| 148 |
+
manager = get_field_registry_manager()
|
| 149 |
+
|
| 150 |
+
# 1. Registry-based dynamic detection
|
| 151 |
+
fields = manager.get_field_definitions()
|
| 152 |
+
if field in fields:
|
| 153 |
+
field_config = fields[field]
|
| 154 |
+
field_path = field_config.get('jsonpath', '')
|
| 155 |
+
if field_path:
|
| 156 |
+
is_present, value = manager.detect_field_presence(aibom, field_path)
|
| 157 |
+
if is_present:
|
| 158 |
+
return True
|
| 159 |
+
|
| 160 |
+
# 2. Extraction results check
|
| 161 |
+
if extraction_results and field in extraction_results:
|
| 162 |
+
extraction_result = extraction_results[field]
|
| 163 |
+
# Handle Pydantic model vs Dict vs Object
|
| 164 |
+
if hasattr(extraction_result, 'confidence'):
|
| 165 |
+
# Object/Model access
|
| 166 |
+
conf = extraction_result.confidence
|
| 167 |
+
# conf could be an Enum or string
|
| 168 |
+
val = conf.value if hasattr(conf, 'value') else conf
|
| 169 |
+
if val == 'none':
|
| 170 |
+
return False
|
| 171 |
+
return val in ['medium', 'high']
|
| 172 |
+
elif hasattr(extraction_result, 'value'):
|
| 173 |
+
val = extraction_result.value
|
| 174 |
+
return val not in ['NOASSERTION', 'NOT_FOUND', None, '']
|
| 175 |
+
else:
|
| 176 |
+
# Should probably return True if present in dict?
|
| 177 |
+
return True
|
| 178 |
+
|
| 179 |
+
# 3. Fallback
|
| 180 |
+
return check_field_in_aibom(aibom, field)
|
| 181 |
+
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.error(f"Error in enhanced field detection for {field}: {e}")
|
| 184 |
+
return check_field_in_aibom(aibom, field)
|
| 185 |
+
|
| 186 |
+
def determine_completeness_profile(aibom: Dict[str, Any], score: float) -> Dict[str, Any]:
|
| 187 |
+
satisfied_profiles = []
|
| 188 |
+
|
| 189 |
+
for profile_name, profile in COMPLETENESS_PROFILES.items():
|
| 190 |
+
all_required_present = all(check_field_in_aibom(aibom, field) for field in profile["required_fields"])
|
| 191 |
+
score_sufficient = score >= profile["minimum_score"]
|
| 192 |
+
|
| 193 |
+
if all_required_present and score_sufficient:
|
| 194 |
+
satisfied_profiles.append(profile_name)
|
| 195 |
+
|
| 196 |
+
if "advanced" in satisfied_profiles:
|
| 197 |
+
profile = COMPLETENESS_PROFILES.get("advanced", {})
|
| 198 |
+
return {"name": "Advanced", "description": profile.get("description", ""), "satisfied": True}
|
| 199 |
+
elif "standard" in satisfied_profiles:
|
| 200 |
+
profile = COMPLETENESS_PROFILES.get("standard", {})
|
| 201 |
+
return {"name": "Standard", "description": profile.get("description", ""), "satisfied": True}
|
| 202 |
+
elif "basic" in satisfied_profiles:
|
| 203 |
+
profile = COMPLETENESS_PROFILES.get("basic", {})
|
| 204 |
+
return {"name": "Basic", "description": profile.get("description", ""), "satisfied": True}
|
| 205 |
+
else:
|
| 206 |
+
return {"name": "incomplete", "description": "Does not satisfy any completeness profile", "satisfied": False}
|
| 207 |
+
|
| 208 |
+
def generate_field_recommendations(missing_fields: Dict[str, List[str]]) -> List[Dict[str, Any]]:
|
| 209 |
+
recommendations = []
|
| 210 |
+
|
| 211 |
+
for field in missing_fields.get("critical", []):
|
| 212 |
+
if field in VALIDATION_MESSAGES:
|
| 213 |
+
recommendations.append({
|
| 214 |
+
"priority": "high",
|
| 215 |
+
"field": field,
|
| 216 |
+
"message": VALIDATION_MESSAGES[field]["missing"],
|
| 217 |
+
"recommendation": VALIDATION_MESSAGES[field]["recommendation"]
|
| 218 |
+
})
|
| 219 |
+
else:
|
| 220 |
+
recommendations.append({
|
| 221 |
+
"priority": "high",
|
| 222 |
+
"field": field,
|
| 223 |
+
"message": f"Missing critical field: {field}",
|
| 224 |
+
"recommendation": f"Add {field} to improve documentation completeness"
|
| 225 |
+
})
|
| 226 |
+
|
| 227 |
+
for field in missing_fields.get("important", []):
|
| 228 |
+
if field in VALIDATION_MESSAGES:
|
| 229 |
+
recommendations.append({
|
| 230 |
+
"priority": "medium",
|
| 231 |
+
"field": field,
|
| 232 |
+
"message": VALIDATION_MESSAGES[field]["missing"],
|
| 233 |
+
"recommendation": VALIDATION_MESSAGES[field]["recommendation"]
|
| 234 |
+
})
|
| 235 |
+
else:
|
| 236 |
+
recommendations.append({
|
| 237 |
+
"priority": "medium",
|
| 238 |
+
"field": field,
|
| 239 |
+
"message": f"Missing field: {field}",
|
| 240 |
+
"recommendation": f"Consider adding {field}"
|
| 241 |
+
})
|
| 242 |
+
|
| 243 |
+
supplementary_count = 0
|
| 244 |
+
for field in missing_fields.get("supplementary", []):
|
| 245 |
+
if supplementary_count >= 5: break
|
| 246 |
+
recommendations.append({
|
| 247 |
+
"priority": "low",
|
| 248 |
+
"field": field,
|
| 249 |
+
"message": f"Missing supplementary field: {field}",
|
| 250 |
+
"recommendation": f"Consider adding {field}"
|
| 251 |
+
})
|
| 252 |
+
supplementary_count += 1
|
| 253 |
+
|
| 254 |
+
return recommendations
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def calculate_completeness_score(aibom: Dict[str, Any], validate: bool = True, extraction_results: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 258 |
+
"""
|
| 259 |
+
Calculate completeness score using registry-defined weights and rules.
|
| 260 |
+
"""
|
| 261 |
+
# Max points (weights)
|
| 262 |
+
category_weights = SCORING_WEIGHTS.get("category_weights", {})
|
| 263 |
+
max_scores = {
|
| 264 |
+
"required_fields": category_weights.get("required_fields", 20),
|
| 265 |
+
"metadata": category_weights.get("metadata", 20),
|
| 266 |
+
"component_basic": category_weights.get("component_basic", 20),
|
| 267 |
+
"component_model_card": category_weights.get("component_model_card", 30),
|
| 268 |
+
"external_references": category_weights.get("external_references", 10)
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
missing_fields = {"critical": [], "important": [], "supplementary": []}
|
| 272 |
+
fields_by_category = {category: {"total": 0, "present": 0} for category in max_scores.keys()}
|
| 273 |
+
field_checklist = {}
|
| 274 |
+
field_types = {}
|
| 275 |
+
field_reference_urls = {}
|
| 276 |
+
category_fields_list = {category: [] for category in max_scores.keys()}
|
| 277 |
+
|
| 278 |
+
# Evaluate fields
|
| 279 |
+
for field, classification in FIELD_CLASSIFICATION.items():
|
| 280 |
+
tier = classification["tier"]
|
| 281 |
+
category = classification["category"]
|
| 282 |
+
is_gguf = classification.get("is_gguf", False)
|
| 283 |
+
jsonpath = classification.get("jsonpath", "")
|
| 284 |
+
|
| 285 |
+
# Ensure category exists in tracking, else fallback or skip?
|
| 286 |
+
# Ideally FIELD_CLASSIFICATION only contains known categories.
|
| 287 |
+
if category not in fields_by_category:
|
| 288 |
+
fields_by_category[category] = {"total": 0, "present": 0}
|
| 289 |
+
category_fields_list[category] = []
|
| 290 |
+
|
| 291 |
+
is_present = check_field_with_enhanced_results(aibom, field, extraction_results)
|
| 292 |
+
|
| 293 |
+
if not is_gguf or is_present:
|
| 294 |
+
fields_by_category[category]["total"] += 1
|
| 295 |
+
|
| 296 |
+
display_path = jsonpath.replace("$.components[0].", "")
|
| 297 |
+
if display_path.startswith("$."): display_path = display_path[2:]
|
| 298 |
+
|
| 299 |
+
tier_display = {"critical": "Critical", "important": "Important", "supplementary": "Supplementary"}.get(tier, "Unknown")
|
| 300 |
+
|
| 301 |
+
category_fields_list[category].append({
|
| 302 |
+
"name": field,
|
| 303 |
+
"tier": tier_display,
|
| 304 |
+
"path": display_path
|
| 305 |
+
})
|
| 306 |
+
|
| 307 |
+
if is_present:
|
| 308 |
+
fields_by_category[category]["present"] += 1
|
| 309 |
+
else:
|
| 310 |
+
if not is_gguf:
|
| 311 |
+
if tier in missing_fields:
|
| 312 |
+
missing_fields[tier].append(field)
|
| 313 |
+
|
| 314 |
+
importance_indicator = "★★★" if tier == "critical" else "★★" if tier == "important" else "★"
|
| 315 |
+
field_checklist[field] = f"{'✔' if is_present else '✘'} {importance_indicator}"
|
| 316 |
+
field_types[field] = classification.get("parameter_type", "CDX")
|
| 317 |
+
ref_urls = classification.get("reference_urls", {})
|
| 318 |
+
selected_url = ""
|
| 319 |
+
if isinstance(ref_urls, dict):
|
| 320 |
+
spec_version = aibom.get("specVersion", "1.6")
|
| 321 |
+
if spec_version == "1.7" and "cyclonedx_1.7" in ref_urls:
|
| 322 |
+
selected_url = ref_urls["cyclonedx_1.7"]
|
| 323 |
+
elif "cyclonedx_1.6" in ref_urls:
|
| 324 |
+
selected_url = ref_urls["cyclonedx_1.6"]
|
| 325 |
+
if spec_version == "1.7" and "cyclonedx.org/docs/1.6" in selected_url:
|
| 326 |
+
selected_url = selected_url.replace("1.6", "1.7")
|
| 327 |
+
elif "genai_aibom_taxonomy" in ref_urls:
|
| 328 |
+
selected_url = ref_urls["genai_aibom_taxonomy"]
|
| 329 |
+
elif "spdx_3.1" in ref_urls:
|
| 330 |
+
selected_url = ref_urls["spdx_3.1"]
|
| 331 |
+
elif isinstance(ref_urls, str):
|
| 332 |
+
selected_url = ref_urls
|
| 333 |
+
|
| 334 |
+
field_reference_urls[field] = selected_url
|
| 335 |
+
# Calculate category scores
|
| 336 |
+
category_details = {}
|
| 337 |
+
category_scores = {}
|
| 338 |
+
for category, counts in fields_by_category.items():
|
| 339 |
+
weight = max_scores.get(category, 0)
|
| 340 |
+
percentage = 0
|
| 341 |
+
if counts["total"] > 0:
|
| 342 |
+
percentage = (counts["present"] / counts["total"]) * 100
|
| 343 |
+
raw_score = (percentage / 100) * weight
|
| 344 |
+
category_scores[category] = round(raw_score, 1)
|
| 345 |
+
else:
|
| 346 |
+
category_scores[category] = 0.0
|
| 347 |
+
|
| 348 |
+
category_details[category] = {
|
| 349 |
+
"present_fields": counts["present"],
|
| 350 |
+
"total_fields": counts["total"],
|
| 351 |
+
"max_points": weight,
|
| 352 |
+
"percentage": round(percentage, 1)
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
subtotal_score = sum(category_scores.values())
|
| 357 |
+
|
| 358 |
+
# Penalties
|
| 359 |
+
missing_critical = len(missing_fields["critical"])
|
| 360 |
+
missing_important = len(missing_fields["important"])
|
| 361 |
+
|
| 362 |
+
penalty_factor = 1.0
|
| 363 |
+
penalty_reasons = []
|
| 364 |
+
|
| 365 |
+
if missing_critical > 3:
|
| 366 |
+
penalty_factor *= 0.8
|
| 367 |
+
penalty_reasons.append("Multiple critical fields missing")
|
| 368 |
+
elif missing_critical >= 2:
|
| 369 |
+
penalty_factor *= 0.9
|
| 370 |
+
penalty_reasons.append("Some critical fields missing")
|
| 371 |
+
|
| 372 |
+
if missing_important >= 5:
|
| 373 |
+
penalty_factor *= 0.95
|
| 374 |
+
penalty_reasons.append("Several important fields missing")
|
| 375 |
+
|
| 376 |
+
final_score = round(subtotal_score * penalty_factor, 1)
|
| 377 |
+
final_score = max(0.0, min(final_score, 100.0))
|
| 378 |
+
|
| 379 |
+
# Prepare result
|
| 380 |
+
result = {
|
| 381 |
+
"total_score": final_score,
|
| 382 |
+
"subtotal_score": subtotal_score,
|
| 383 |
+
"section_scores": category_scores,
|
| 384 |
+
"category_details": category_details,
|
| 385 |
+
"max_scores": max_scores,
|
| 386 |
+
"field_checklist": field_checklist,
|
| 387 |
+
"field_types": field_types,
|
| 388 |
+
"reference_urls": field_reference_urls,
|
| 389 |
+
"missing_fields": missing_fields,
|
| 390 |
+
"category_fields_list": category_fields_list,
|
| 391 |
+
"completeness_profile": determine_completeness_profile(aibom, final_score),
|
| 392 |
+
"penalty_applied": penalty_factor < 1.0,
|
| 393 |
+
"penalty_reason": " and ".join(penalty_reasons) if penalty_reasons else None,
|
| 394 |
+
"recommendations": generate_field_recommendations(missing_fields)
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
if validate:
|
| 398 |
+
validation_report = validate_aibom(aibom)
|
| 399 |
+
result["validation"] = validation_report
|
| 400 |
+
|
| 401 |
+
return result
|
| 402 |
+
|
| 403 |
+
def _validate_ai_requirements(aibom: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 404 |
+
# ... logic from utils.py ...
|
| 405 |
+
# Implementing minimal version or copying full logic?
|
| 406 |
+
# I'll implement a concise version.
|
| 407 |
+
issues = []
|
| 408 |
+
if "bomFormat" in aibom and aibom["bomFormat"] != "CycloneDX":
|
| 409 |
+
issues.append({"severity": "error", "code": "INVALID_BOM_FORMAT", "message": "Must be CycloneDX", "path": "$.bomFormat"})
|
| 410 |
+
# ... (Add more crucial checks here as needed)
|
| 411 |
+
return issues
|
| 412 |
+
|
| 413 |
+
def validate_aibom(aibom: Dict[str, Any]) -> Dict[str, Any]:
|
| 414 |
+
"""
|
| 415 |
+
Validate the AIBOM against the appropriate CycloneDX schema.
|
| 416 |
+
"""
|
| 417 |
+
issues = []
|
| 418 |
+
|
| 419 |
+
# 1. Schema Validation (using local schemas)
|
| 420 |
+
try:
|
| 421 |
+
import json
|
| 422 |
+
import jsonschema
|
| 423 |
+
import os
|
| 424 |
+
|
| 425 |
+
spec_version = aibom.get("specVersion", "1.6")
|
| 426 |
+
schema_file = f"bom-{spec_version}.schema.json"
|
| 427 |
+
# Relative path from src/models/scoring.py -> src/schemas/
|
| 428 |
+
schema_path = os.path.join(os.path.dirname(__file__), '..', 'schemas', schema_file)
|
| 429 |
+
|
| 430 |
+
if os.path.exists(schema_path):
|
| 431 |
+
with open(schema_path, 'r', encoding="utf-8") as f:
|
| 432 |
+
schema = json.load(f)
|
| 433 |
+
if JSON_SCHEMA_REGISTRY is not None:
|
| 434 |
+
jsonschema.validate(instance=aibom, schema=schema, registry=JSON_SCHEMA_REGISTRY)
|
| 435 |
+
else:
|
| 436 |
+
jsonschema.validate(instance=aibom, schema=schema)
|
| 437 |
+
else:
|
| 438 |
+
# If schema missing, warn but don't fail hard
|
| 439 |
+
issues.append({"severity": "warning", "message": f"Schema file not found: {schema_file}, skipping strict validation."})
|
| 440 |
+
|
| 441 |
+
except jsonschema.ValidationError as e:
|
| 442 |
+
issues.append({"severity": "error", "message": e.message, "path": getattr(e, "json_path", "unknown")})
|
| 443 |
+
except Exception as e:
|
| 444 |
+
issues.append({"severity": "error", "message": f"Validation error: {str(e)}"})
|
| 445 |
+
|
| 446 |
+
# 2. Custom Business Logic Checks (AI Requirements)
|
| 447 |
+
custom_issues = _validate_ai_requirements(aibom)
|
| 448 |
+
issues.extend(custom_issues)
|
| 449 |
+
|
| 450 |
+
return {
|
| 451 |
+
"valid": not any(i["severity"] == "error" for i in issues),
|
| 452 |
+
"issues": issues,
|
| 453 |
+
"error_count": sum(1 for i in issues if i["severity"] == "error")
|
| 454 |
+
}
|
src/models/service.py
ADDED
|
@@ -0,0 +1,721 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import json
|
| 3 |
+
import uuid
|
| 4 |
+
import datetime
|
| 5 |
+
import logging
|
| 6 |
+
import re
|
| 7 |
+
from typing import Dict, Optional, Any, List, Union
|
| 8 |
+
from urllib.parse import urlparse
|
| 9 |
+
from packageurl import PackageURL
|
| 10 |
+
|
| 11 |
+
from huggingface_hub import HfApi, ModelCard
|
| 12 |
+
from huggingface_hub.repocard_data import EvalResult
|
| 13 |
+
|
| 14 |
+
from .extractor import EnhancedExtractor
|
| 15 |
+
from .model_file_extractors import ModelFileExtractor, default_extractors
|
| 16 |
+
from .scoring import calculate_completeness_score
|
| 17 |
+
from .registry import get_field_registry_manager
|
| 18 |
+
from .schemas import AIBOMResponse, EnhancementReport
|
| 19 |
+
from ..utils.validation import validate_aibom, get_validation_summary
|
| 20 |
+
from ..utils.license_utils import normalize_license_id, get_license_url, is_valid_spdx_license_id
|
| 21 |
+
from ..config import AIBOM_GEN_VERSION, AIBOM_GEN_NAME
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
class AIBOMService:
|
| 26 |
+
"""
|
| 27 |
+
Service layer for AI SBOM generation.
|
| 28 |
+
Orchestrates metadata extraction, AI SBOM structure creation, and scoring.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
hf_token: Optional[str] = None,
|
| 34 |
+
inference_model_url: Optional[str] = None,
|
| 35 |
+
use_inference: bool = True,
|
| 36 |
+
use_best_practices: bool = True,
|
| 37 |
+
model_file_extractors: Optional[List[ModelFileExtractor]] = None,
|
| 38 |
+
):
|
| 39 |
+
self.hf_api = HfApi(token=hf_token)
|
| 40 |
+
self.inference_model_url = inference_model_url
|
| 41 |
+
self.use_inference = use_inference
|
| 42 |
+
self.use_best_practices = use_best_practices
|
| 43 |
+
self.enhancement_report = None
|
| 44 |
+
self.extraction_results = {}
|
| 45 |
+
self.model_file_extractors = (
|
| 46 |
+
model_file_extractors if model_file_extractors is not None
|
| 47 |
+
else default_extractors()
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# Initialize registry manager
|
| 51 |
+
try:
|
| 52 |
+
self.registry_manager = get_field_registry_manager()
|
| 53 |
+
logger.info("✅ Registry manager initialized in service")
|
| 54 |
+
except Exception as e:
|
| 55 |
+
logger.warning(f"⚠️ Could not initialize registry manager: {e}")
|
| 56 |
+
self.registry_manager = None
|
| 57 |
+
|
| 58 |
+
def get_extraction_results(self):
|
| 59 |
+
"""Return the enhanced extraction results from the last extraction"""
|
| 60 |
+
return self.extraction_results
|
| 61 |
+
|
| 62 |
+
def get_enhancement_report(self):
|
| 63 |
+
"""Return the enhancement report from the last generation"""
|
| 64 |
+
return self.enhancement_report
|
| 65 |
+
|
| 66 |
+
def generate_aibom(
|
| 67 |
+
self,
|
| 68 |
+
model_id: str,
|
| 69 |
+
include_inference: bool = False,
|
| 70 |
+
use_best_practices: Optional[bool] = None,
|
| 71 |
+
enable_summarization: bool = False,
|
| 72 |
+
spec_version: str = "1.6",
|
| 73 |
+
metadata_overrides: Optional[Dict[str, str]] = None,
|
| 74 |
+
) -> Dict[str, Any]:
|
| 75 |
+
"""
|
| 76 |
+
Generate an AIBOM for the specified Hugging Face model.
|
| 77 |
+
"""
|
| 78 |
+
try:
|
| 79 |
+
model_id = self._normalise_model_id(model_id)
|
| 80 |
+
use_inference = include_inference if include_inference is not None else self.use_inference
|
| 81 |
+
use_best_practices = use_best_practices if use_best_practices is not None else self.use_best_practices
|
| 82 |
+
|
| 83 |
+
logger.info(f"Generating AIBOM for {model_id}")
|
| 84 |
+
|
| 85 |
+
# Fetch generic info
|
| 86 |
+
model_info = self._fetch_model_info(model_id)
|
| 87 |
+
model_card = self._fetch_model_card(model_id)
|
| 88 |
+
|
| 89 |
+
# 1. Extract Metadata
|
| 90 |
+
original_metadata = self._extract_metadata(model_id, model_info, model_card, enable_summarization)
|
| 91 |
+
|
| 92 |
+
# 2. Create Initial AIBOM
|
| 93 |
+
original_aibom = self._create_aibom_structure(model_id, original_metadata, spec_version)
|
| 94 |
+
|
| 95 |
+
# 3. Initial Score
|
| 96 |
+
original_score = calculate_completeness_score(
|
| 97 |
+
original_aibom,
|
| 98 |
+
validate=True,
|
| 99 |
+
extraction_results=self.extraction_results # Using results from _extract_metadata
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# 4. AI Enhancement (Placeholder for now as in original)
|
| 103 |
+
final_metadata = original_metadata.copy()
|
| 104 |
+
ai_enhanced = False
|
| 105 |
+
ai_model_name = None
|
| 106 |
+
|
| 107 |
+
if use_inference and self.inference_model_url:
|
| 108 |
+
# Placeholder for AI enhancement logic
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
# 5. Create Final AIBOM
|
| 112 |
+
aibom = self._create_aibom_structure(model_id, final_metadata, spec_version=spec_version, metadata_overrides=metadata_overrides)
|
| 113 |
+
|
| 114 |
+
# Validate Schema
|
| 115 |
+
is_valid, validation_errors = validate_aibom(aibom)
|
| 116 |
+
if not is_valid:
|
| 117 |
+
logger.warning(f"AIBOM schema validation failed with {len(validation_errors)} errors")
|
| 118 |
+
|
| 119 |
+
# 6. Final Score
|
| 120 |
+
final_score = calculate_completeness_score(
|
| 121 |
+
aibom,
|
| 122 |
+
validate=True,
|
| 123 |
+
extraction_results=self.extraction_results
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
# 7. Store Report
|
| 127 |
+
self.enhancement_report = {
|
| 128 |
+
"ai_enhanced": ai_enhanced,
|
| 129 |
+
"ai_model": ai_model_name,
|
| 130 |
+
"original_score": original_score,
|
| 131 |
+
"final_score": final_score,
|
| 132 |
+
"improvement": round(final_score["total_score"] - original_score["total_score"], 2) if ai_enhanced else 0,
|
| 133 |
+
"schema_validation": {
|
| 134 |
+
"valid": is_valid,
|
| 135 |
+
"error_count": len(validation_errors),
|
| 136 |
+
"errors": validation_errors[:10] if not is_valid else []
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
return aibom
|
| 141 |
+
|
| 142 |
+
except Exception as e:
|
| 143 |
+
logger.error(f"Error generating AIBOM: {e}", exc_info=True)
|
| 144 |
+
return self._create_minimal_aibom(model_id, spec_version)
|
| 145 |
+
|
| 146 |
+
def _extract_metadata(self, model_id: str, model_info: Dict[str, Any], model_card: Optional[ModelCard], enable_summarization: bool = False) -> Dict[str, Any]:
|
| 147 |
+
"""Wrapper around EnhancedExtractor"""
|
| 148 |
+
extractor = EnhancedExtractor(self.hf_api, model_file_extractors=self.model_file_extractors)
|
| 149 |
+
# Ideally we reuse the registry manager
|
| 150 |
+
if self.registry_manager:
|
| 151 |
+
extractor.registry_manager = self.registry_manager
|
| 152 |
+
extractor.registry_fields = self.registry_manager.get_field_definitions()
|
| 153 |
+
|
| 154 |
+
metadata = extractor.extract_metadata(model_id, model_info, model_card, enable_summarization=enable_summarization)
|
| 155 |
+
self.extraction_results = extractor.extraction_results
|
| 156 |
+
return metadata
|
| 157 |
+
|
| 158 |
+
def _generate_purl(self, model_id: str, version: str, purl_type: str = "huggingface") -> str:
|
| 159 |
+
"""Generate PURL using packageurl-python library
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
model_id: Model identifier (e.g., "owner/model" or "model")
|
| 163 |
+
version: Version string
|
| 164 |
+
purl_type: PURL type (default: "huggingface", also supports "generic")
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
PURL string in format pkg:type/namespace/name@version
|
| 168 |
+
"""
|
| 169 |
+
parts = model_id.split("/", 1)
|
| 170 |
+
namespace = parts[0] if len(parts) == 2 else None
|
| 171 |
+
name = parts[1] if len(parts) == 2 else parts[0]
|
| 172 |
+
purl = PackageURL(type=purl_type, namespace=namespace, name=name, version=version)
|
| 173 |
+
return purl.to_string()
|
| 174 |
+
|
| 175 |
+
def _get_tool_purl(self) -> str:
|
| 176 |
+
"""Get PURL for OWASP AIBOM Generator tool"""
|
| 177 |
+
purl = PackageURL(type="generic", namespace="owasp-genai", name=AIBOM_GEN_NAME, version=AIBOM_GEN_VERSION)
|
| 178 |
+
return purl.to_string()
|
| 179 |
+
|
| 180 |
+
def _get_tool_metadata(self) -> Dict[str, Any]:
|
| 181 |
+
"""Generate the standardized tool metadata for the AIBOM Generator"""
|
| 182 |
+
return {
|
| 183 |
+
"components": [{
|
| 184 |
+
"bom-ref": self._get_tool_purl(),
|
| 185 |
+
"type": "application",
|
| 186 |
+
"name": AIBOM_GEN_NAME,
|
| 187 |
+
"version": AIBOM_GEN_VERSION,
|
| 188 |
+
"manufacturer": {"name": "OWASP GenAI Security Project"}
|
| 189 |
+
}]
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
def _create_minimal_aibom(self, model_id: str, spec_version: str = "1.6") -> Dict[str, Any]:
|
| 193 |
+
"""Create a minimal valid AIBOM structure in case of errors"""
|
| 194 |
+
hf_purl = self._generate_purl(model_id, "1.0")
|
| 195 |
+
metadata_purl = self._generate_purl(model_id, "1.0", purl_type="generic")
|
| 196 |
+
|
| 197 |
+
return {
|
| 198 |
+
"bomFormat": "CycloneDX",
|
| 199 |
+
"specVersion": spec_version,
|
| 200 |
+
"serialNumber": f"urn:uuid:{str(uuid.uuid4())}",
|
| 201 |
+
"version": 1,
|
| 202 |
+
"metadata": {
|
| 203 |
+
"timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(timespec='seconds'),
|
| 204 |
+
"tools": self._get_tool_metadata(),
|
| 205 |
+
"component": {
|
| 206 |
+
"bom-ref": metadata_purl,
|
| 207 |
+
"type": "application",
|
| 208 |
+
"name": model_id.split("/")[-1],
|
| 209 |
+
"version": "1.0"
|
| 210 |
+
}
|
| 211 |
+
},
|
| 212 |
+
"components": [{
|
| 213 |
+
"bom-ref": hf_purl,
|
| 214 |
+
"type": "machine-learning-model",
|
| 215 |
+
"name": model_id.split("/")[-1],
|
| 216 |
+
"version": "1.0",
|
| 217 |
+
"purl": hf_purl
|
| 218 |
+
}]
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
def _fetch_with_backoff(self, fetch_func, *args, max_retries=3, initial_backoff=1.0, **kwargs):
|
| 222 |
+
import time
|
| 223 |
+
for attempt in range(max_retries):
|
| 224 |
+
try:
|
| 225 |
+
return fetch_func(*args, **kwargs)
|
| 226 |
+
except Exception as e:
|
| 227 |
+
# e.g., huggingface_hub.utils.HfHubHTTPError
|
| 228 |
+
error_msg = str(e)
|
| 229 |
+
if "401" in error_msg or "404" in error_msg: # Auth or not found don't retry
|
| 230 |
+
raise e
|
| 231 |
+
if attempt == max_retries - 1:
|
| 232 |
+
logger.warning(f"Final attempt failed for API call: {e}")
|
| 233 |
+
raise e
|
| 234 |
+
|
| 235 |
+
sleep_time = initial_backoff * (2 ** attempt)
|
| 236 |
+
logger.warning(f"API call failed: {e}. Retrying in {sleep_time} seconds...")
|
| 237 |
+
time.sleep(sleep_time)
|
| 238 |
+
|
| 239 |
+
def _fetch_model_info(self, model_id: str) -> Dict[str, Any]:
|
| 240 |
+
try:
|
| 241 |
+
return self._fetch_with_backoff(self.hf_api.model_info, model_id)
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.warning(f"Error fetching model info for {model_id}: {e}")
|
| 244 |
+
return {}
|
| 245 |
+
|
| 246 |
+
def _fetch_model_card(self, model_id: str) -> Optional[ModelCard]:
|
| 247 |
+
try:
|
| 248 |
+
return self._fetch_with_backoff(ModelCard.load, model_id)
|
| 249 |
+
except Exception as e:
|
| 250 |
+
logger.warning(f"Error fetching model card for {model_id}: {e}")
|
| 251 |
+
return None
|
| 252 |
+
|
| 253 |
+
@staticmethod
|
| 254 |
+
def _normalise_model_id(raw_id: str) -> str:
|
| 255 |
+
if raw_id.startswith(("http://", "https://")):
|
| 256 |
+
path = urlparse(raw_id).path.lstrip("/")
|
| 257 |
+
parts = path.split("/")
|
| 258 |
+
if len(parts) >= 2:
|
| 259 |
+
return "/".join(parts[:2])
|
| 260 |
+
return path
|
| 261 |
+
return raw_id
|
| 262 |
+
|
| 263 |
+
def _create_aibom_structure(self, model_id: str, metadata: Dict[str, Any], spec_version: str = "1.6",
|
| 264 |
+
metadata_overrides: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
|
| 265 |
+
full_commit = metadata.get("commit")
|
| 266 |
+
version = full_commit[:8] if full_commit else "1.0"
|
| 267 |
+
|
| 268 |
+
aibom = {
|
| 269 |
+
"bomFormat": "CycloneDX",
|
| 270 |
+
"specVersion": spec_version,
|
| 271 |
+
"serialNumber": f"urn:uuid:{str(uuid.uuid4())}",
|
| 272 |
+
"version": 1,
|
| 273 |
+
"metadata": self._create_metadata_section(model_id, metadata, overrides=metadata_overrides),
|
| 274 |
+
"components": [self._create_component_section(model_id, metadata)],
|
| 275 |
+
"dependencies": [
|
| 276 |
+
{
|
| 277 |
+
"ref": self._generate_purl(model_id, version, purl_type="generic"),
|
| 278 |
+
"dependsOn": [self._generate_purl(model_id, version)]
|
| 279 |
+
}
|
| 280 |
+
]
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
return aibom
|
| 286 |
+
|
| 287 |
+
def _create_metadata_section(self, model_id: str, metadata: Dict[str, Any], overrides: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
|
| 288 |
+
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat(timespec='seconds')
|
| 289 |
+
|
| 290 |
+
# Defaults
|
| 291 |
+
default_timestamp = datetime.datetime.now().strftime("job-%Y-%m-%d-%H:%M:%S")
|
| 292 |
+
default_version = str(int(datetime.datetime.now().timestamp()))
|
| 293 |
+
default_mfr = "OWASP AIBOM Generator"
|
| 294 |
+
|
| 295 |
+
# Apply oveerides or defaults
|
| 296 |
+
overrides = overrides or {}
|
| 297 |
+
comp_name = overrides.get("name") or default_timestamp
|
| 298 |
+
comp_version = overrides.get("version") or default_version
|
| 299 |
+
comp_mfr = overrides.get("manufacturer") or default_mfr
|
| 300 |
+
|
| 301 |
+
# Normalize for PURL (replace spaces with - or similar if needed, but minimal change is best)
|
| 302 |
+
purl_ns = comp_mfr.replace(" ", "-") # simplistic sanitation
|
| 303 |
+
purl_name = comp_name.replace(" ", "-")
|
| 304 |
+
purl = PackageURL(type="generic", namespace=purl_ns, name=purl_name, version=comp_version).to_string()
|
| 305 |
+
|
| 306 |
+
tools = {"tools": self._get_tool_metadata()}
|
| 307 |
+
|
| 308 |
+
authors = []
|
| 309 |
+
if "author" in metadata and metadata["author"]:
|
| 310 |
+
authors.append({"name": metadata["author"]})
|
| 311 |
+
|
| 312 |
+
component = {
|
| 313 |
+
"bom-ref": purl,
|
| 314 |
+
"type": "application",
|
| 315 |
+
"name": comp_name,
|
| 316 |
+
"description": f"Generating SBOM for {model_id}",
|
| 317 |
+
"version": comp_version,
|
| 318 |
+
"purl": purl,
|
| 319 |
+
"manufacturer": {"name": comp_mfr},
|
| 320 |
+
"supplier": {"name": comp_mfr}
|
| 321 |
+
}
|
| 322 |
+
if authors:
|
| 323 |
+
component["authors"] = authors
|
| 324 |
+
|
| 325 |
+
return {
|
| 326 |
+
"timestamp": timestamp,
|
| 327 |
+
**tools,
|
| 328 |
+
"component": component
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
def _create_component_section(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
| 332 |
+
parts = model_id.split("/")
|
| 333 |
+
group = parts[0] if len(parts) > 1 else ""
|
| 334 |
+
name = parts[1] if len(parts) > 1 else parts[0]
|
| 335 |
+
full_commit = metadata.get("commit")
|
| 336 |
+
version = full_commit[:8] if full_commit else "1.0"
|
| 337 |
+
purl = self._generate_purl(model_id, version)
|
| 338 |
+
|
| 339 |
+
component = {
|
| 340 |
+
"bom-ref": purl,
|
| 341 |
+
"type": "machine-learning-model",
|
| 342 |
+
"group": group,
|
| 343 |
+
"name": name,
|
| 344 |
+
"version": version,
|
| 345 |
+
"purl": purl,
|
| 346 |
+
"description": metadata.get("description", f"AI model {model_id}")
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
# 1. Licenses
|
| 350 |
+
licenses = self._process_licenses(metadata)
|
| 351 |
+
if licenses:
|
| 352 |
+
component["licenses"] = licenses
|
| 353 |
+
|
| 354 |
+
# 2. Authors, Manufacturer, Supplier
|
| 355 |
+
# Note: logic inferred from group and metadata
|
| 356 |
+
authors, manufacturer, supplier = self._process_authors_and_suppliers(metadata, group)
|
| 357 |
+
if authors:
|
| 358 |
+
component["authors"] = authors
|
| 359 |
+
if manufacturer:
|
| 360 |
+
component["manufacturer"] = manufacturer
|
| 361 |
+
if supplier:
|
| 362 |
+
component["supplier"] = supplier
|
| 363 |
+
|
| 364 |
+
# 3. Technical Properties
|
| 365 |
+
tech_props = self._process_technical_properties(metadata)
|
| 366 |
+
if tech_props:
|
| 367 |
+
component["properties"] = tech_props
|
| 368 |
+
|
| 369 |
+
# 4. External References
|
| 370 |
+
external_refs = self._process_external_references(model_id, metadata)
|
| 371 |
+
if external_refs:
|
| 372 |
+
component["externalReferences"] = external_refs
|
| 373 |
+
|
| 374 |
+
# 5. Model Card
|
| 375 |
+
component["modelCard"] = self._create_model_card_section(metadata)
|
| 376 |
+
|
| 377 |
+
# Defined order for better readability: bom-ref, type, group, name, version, purl, description, modelCard, manufacturer, supplier, authors
|
| 378 |
+
# We also need to preserve: licenses, properties, externalReferences (placing them logically)
|
| 379 |
+
ordered_keys = [
|
| 380 |
+
"bom-ref", "type", "group", "name", "version", "purl",
|
| 381 |
+
"description", "licenses", "modelCard",
|
| 382 |
+
"manufacturer", "supplier", "authors",
|
| 383 |
+
"properties", "externalReferences"
|
| 384 |
+
]
|
| 385 |
+
|
| 386 |
+
ordered_component = {}
|
| 387 |
+
for key in ordered_keys:
|
| 388 |
+
if key in component:
|
| 389 |
+
ordered_component[key] = component[key]
|
| 390 |
+
|
| 391 |
+
# Ensure we didn't miss anything (though we shouldn't have extra keys usually)
|
| 392 |
+
for k, v in component.items():
|
| 393 |
+
if k not in ordered_component:
|
| 394 |
+
ordered_component[k] = v
|
| 395 |
+
|
| 396 |
+
return ordered_component
|
| 397 |
+
|
| 398 |
+
def _process_licenses(self, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 399 |
+
"""Process and normalize license information."""
|
| 400 |
+
raw_license = metadata.get("licenses") or metadata.get("license")
|
| 401 |
+
|
| 402 |
+
# 1. No license provided -> Return empty list (no license in SBOM)
|
| 403 |
+
if not raw_license:
|
| 404 |
+
return []
|
| 405 |
+
|
| 406 |
+
# Handle list input
|
| 407 |
+
if isinstance(raw_license, list):
|
| 408 |
+
if len(raw_license) > 0:
|
| 409 |
+
raw_license = raw_license[0]
|
| 410 |
+
else:
|
| 411 |
+
return []
|
| 412 |
+
|
| 413 |
+
if not isinstance(raw_license, str) or not raw_license.strip():
|
| 414 |
+
return []
|
| 415 |
+
|
| 416 |
+
norm_license = normalize_license_id(raw_license)
|
| 417 |
+
|
| 418 |
+
# Skip NOASSERTION or 'other' explicitly
|
| 419 |
+
if norm_license == "NOASSERTION" or (norm_license and norm_license.lower() == "other"):
|
| 420 |
+
return []
|
| 421 |
+
|
| 422 |
+
if norm_license:
|
| 423 |
+
# 1. Strict SPDX validation
|
| 424 |
+
if not is_valid_spdx_license_id(norm_license):
|
| 425 |
+
lic_data = {"name": norm_license}
|
| 426 |
+
# Try to find a known URL (e.g. for Nvidia license)
|
| 427 |
+
known_url = get_license_url(norm_license, fallback=False)
|
| 428 |
+
if known_url:
|
| 429 |
+
lic_data["url"] = known_url
|
| 430 |
+
return [{"license": lic_data}]
|
| 431 |
+
|
| 432 |
+
# 2. Valid SPDX ID
|
| 433 |
+
return [{"license": {"id": norm_license}}]
|
| 434 |
+
|
| 435 |
+
# Fallback if normalization fails, use name unless generic
|
| 436 |
+
if raw_license.lower() not in ["other", "unknown", "noassertion"]:
|
| 437 |
+
return [{"license": {"name": raw_license}}]
|
| 438 |
+
|
| 439 |
+
return []
|
| 440 |
+
|
| 441 |
+
def _process_authors_and_suppliers(self, metadata: Dict[str, Any], group: str) -> tuple:
|
| 442 |
+
"""
|
| 443 |
+
Process authors, manufacturer, and supplier information.
|
| 444 |
+
Returns: (authors, manufacturer, supplier)
|
| 445 |
+
"""
|
| 446 |
+
authors = []
|
| 447 |
+
raw_author = metadata.get("author", group)
|
| 448 |
+
if raw_author and raw_author != "unknown":
|
| 449 |
+
if isinstance(raw_author, str):
|
| 450 |
+
authors.append({"name": raw_author})
|
| 451 |
+
elif isinstance(raw_author, list):
|
| 452 |
+
for a in raw_author:
|
| 453 |
+
authors.append({"name": a})
|
| 454 |
+
|
| 455 |
+
manufacturer = None
|
| 456 |
+
supplier = None
|
| 457 |
+
|
| 458 |
+
# Manufacturer and Supplier
|
| 459 |
+
# Use the group (org name) as the manufacturer and supplier if available
|
| 460 |
+
# If 'suppliedBy' extracted from README, overwrite supplier
|
| 461 |
+
supplier_entity = None
|
| 462 |
+
if group:
|
| 463 |
+
supplier_entity = {
|
| 464 |
+
"name": group,
|
| 465 |
+
"url": [f"https://huggingface.co/{group}"]
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
if "suppliedBy" in metadata and metadata["suppliedBy"]:
|
| 469 |
+
# If we have explicit suppliedBy, use it for supplier
|
| 470 |
+
supplier_entity = {"name": metadata["suppliedBy"]}
|
| 471 |
+
|
| 472 |
+
if supplier_entity:
|
| 473 |
+
supplier = supplier_entity
|
| 474 |
+
# Manufacturer often implies the creator/fine-tuner.
|
| 475 |
+
# If we have a group, we assume they manufactured it too unless specified.
|
| 476 |
+
if group:
|
| 477 |
+
manufacturer = {
|
| 478 |
+
"name": group,
|
| 479 |
+
"url": [f"https://huggingface.co/{group}"]
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
return authors, manufacturer, supplier
|
| 483 |
+
|
| 484 |
+
def _process_technical_properties(self, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 485 |
+
tech_props = []
|
| 486 |
+
for field in ["model_type", "architectures", "library_name"]:
|
| 487 |
+
if field in metadata:
|
| 488 |
+
val = metadata[field]
|
| 489 |
+
if isinstance(val, list):
|
| 490 |
+
val = ", ".join(val)
|
| 491 |
+
tech_props.append({"name": field, "value": str(val)})
|
| 492 |
+
return tech_props
|
| 493 |
+
|
| 494 |
+
def _process_external_references(self, model_id: str, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 495 |
+
"""Process external references including Hugging Face URLs and papers."""
|
| 496 |
+
# Start with generic website reference
|
| 497 |
+
generic_ref = {"type": "website", "url": f"https://huggingface.co/{model_id}"}
|
| 498 |
+
external_refs = [generic_ref]
|
| 499 |
+
|
| 500 |
+
if "external_references" in metadata and isinstance(metadata["external_references"], list):
|
| 501 |
+
for ref in metadata["external_references"]:
|
| 502 |
+
if isinstance(ref, dict) and "url" in ref:
|
| 503 |
+
rtype = ref.get("type", "website")
|
| 504 |
+
# Check if URL already exists in our list
|
| 505 |
+
existing_idx = next((i for i, r in enumerate(external_refs) if r["url"] == ref["url"]), -1)
|
| 506 |
+
|
| 507 |
+
new_ref = {"type": rtype, "url": ref["url"], "comment": ref.get("comment")}
|
| 508 |
+
|
| 509 |
+
if existing_idx != -1:
|
| 510 |
+
# If existing is generic (no comment) and new one has comment, replace it
|
| 511 |
+
if not external_refs[existing_idx].get("comment") and new_ref.get("comment"):
|
| 512 |
+
external_refs[existing_idx] = new_ref
|
| 513 |
+
else:
|
| 514 |
+
external_refs.append(new_ref)
|
| 515 |
+
|
| 516 |
+
# Paper (ArXiv or other documentation)
|
| 517 |
+
if "paper" in metadata and metadata["paper"]:
|
| 518 |
+
papers = metadata["paper"]
|
| 519 |
+
if isinstance(papers, str):
|
| 520 |
+
papers = [papers]
|
| 521 |
+
|
| 522 |
+
for p in papers:
|
| 523 |
+
# Check for duplicates
|
| 524 |
+
if not any(r["url"] == p for r in external_refs):
|
| 525 |
+
# Try to infer if it's arxiv for comment
|
| 526 |
+
comment = "Research Paper"
|
| 527 |
+
if "arxiv.org" in p:
|
| 528 |
+
comment = "ArXiv Paper"
|
| 529 |
+
|
| 530 |
+
external_refs.append({
|
| 531 |
+
"type": "documentation",
|
| 532 |
+
"url": p,
|
| 533 |
+
"comment": comment
|
| 534 |
+
})
|
| 535 |
+
|
| 536 |
+
return external_refs
|
| 537 |
+
|
| 538 |
+
def _create_model_card_section(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
| 539 |
+
section = {}
|
| 540 |
+
|
| 541 |
+
# 1. Model Parameters
|
| 542 |
+
params = {}
|
| 543 |
+
# primaryPurpose -> task
|
| 544 |
+
if "primaryPurpose" in metadata:
|
| 545 |
+
params["task"] = metadata["primaryPurpose"]
|
| 546 |
+
elif "pipeline_tag" in metadata:
|
| 547 |
+
params["task"] = metadata["pipeline_tag"]
|
| 548 |
+
|
| 549 |
+
# typeOfModel -> modelArchitecture
|
| 550 |
+
if "typeOfModel" in metadata:
|
| 551 |
+
params["modelArchitecture"] = metadata["typeOfModel"]
|
| 552 |
+
else:
|
| 553 |
+
params["modelArchitecture"] = f"{metadata.get('name', 'Unknown')}Model"
|
| 554 |
+
|
| 555 |
+
# Datasets
|
| 556 |
+
if "datasets" in metadata:
|
| 557 |
+
ds_val = metadata["datasets"]
|
| 558 |
+
datasets = []
|
| 559 |
+
if isinstance(ds_val, list):
|
| 560 |
+
for d in ds_val:
|
| 561 |
+
if isinstance(d, str):
|
| 562 |
+
# CycloneDX 1.7 compliant componentData
|
| 563 |
+
datasets.append({
|
| 564 |
+
"type": "dataset",
|
| 565 |
+
"name": d,
|
| 566 |
+
"contents": {
|
| 567 |
+
"url": f"https://huggingface.co/datasets/{d}"
|
| 568 |
+
}
|
| 569 |
+
})
|
| 570 |
+
elif isinstance(d, dict) and "name" in d:
|
| 571 |
+
datasets.append({"type": "dataset", "name": d.get("name"), "url": d.get("url")})
|
| 572 |
+
elif isinstance(ds_val, str):
|
| 573 |
+
datasets.append({
|
| 574 |
+
"type": "dataset",
|
| 575 |
+
"name": ds_val,
|
| 576 |
+
"contents": {
|
| 577 |
+
"url": f"https://huggingface.co/datasets/{ds_val}"
|
| 578 |
+
}
|
| 579 |
+
})
|
| 580 |
+
|
| 581 |
+
if datasets:
|
| 582 |
+
params["datasets"] = datasets
|
| 583 |
+
|
| 584 |
+
# Inputs / Outputs (Inferred from task)
|
| 585 |
+
task = params.get("task")
|
| 586 |
+
if task:
|
| 587 |
+
inputs, outputs = self._infer_io_formats(task)
|
| 588 |
+
if inputs:
|
| 589 |
+
params["inputs"] = [{"format": i} for i in inputs]
|
| 590 |
+
if outputs:
|
| 591 |
+
params["outputs"] = [{"format": o} for o in outputs]
|
| 592 |
+
|
| 593 |
+
if params:
|
| 594 |
+
section["modelParameters"] = params
|
| 595 |
+
|
| 596 |
+
# 2. Quantitative Analysis
|
| 597 |
+
if "eval_results" in metadata:
|
| 598 |
+
metrics = []
|
| 599 |
+
raw_results = metadata["eval_results"]
|
| 600 |
+
if isinstance(raw_results, list):
|
| 601 |
+
for res in raw_results:
|
| 602 |
+
# Handle object or dict
|
| 603 |
+
if hasattr(res, "metric_type") and hasattr(res, "metric_value"):
|
| 604 |
+
metrics.append({"type": str(res.metric_type), "value": str(res.metric_value)})
|
| 605 |
+
elif isinstance(res, dict) and "metric_type" in res and "metric_value" in res:
|
| 606 |
+
metrics.append({"type": str(res["metric_type"]), "value": str(res["metric_value"])})
|
| 607 |
+
|
| 608 |
+
if metrics:
|
| 609 |
+
section["quantitativeAnalysis"] = {"performanceMetrics": metrics}
|
| 610 |
+
|
| 611 |
+
# 3. Considerations
|
| 612 |
+
considerations = {}
|
| 613 |
+
# intendedUse -> useCases
|
| 614 |
+
if "intendedUse" in metadata:
|
| 615 |
+
considerations["useCases"] = [metadata["intendedUse"]]
|
| 616 |
+
# technicalLimitations
|
| 617 |
+
if "technicalLimitations" in metadata:
|
| 618 |
+
considerations["technicalLimitations"] = [metadata["technicalLimitations"]]
|
| 619 |
+
# ethicalConsiderations
|
| 620 |
+
if "ethicalConsiderations" in metadata:
|
| 621 |
+
considerations["ethicalConsiderations"] = [{"name": "Ethical Considerations", "description": metadata["ethicalConsiderations"]}]
|
| 622 |
+
|
| 623 |
+
if considerations:
|
| 624 |
+
section["considerations"] = considerations
|
| 625 |
+
|
| 626 |
+
# 4. Properties (GGUF & Taxonomy + Leftovers)
|
| 627 |
+
props = []
|
| 628 |
+
|
| 629 |
+
taxonomy_modelcard_mapping = {
|
| 630 |
+
"hyperparameter": "hyperparameter",
|
| 631 |
+
"vocab_size": "vocabSize",
|
| 632 |
+
"tokenizer_class": "tokenizerClass",
|
| 633 |
+
"context_length": "contextLength",
|
| 634 |
+
"embedding_length": "embeddingLength",
|
| 635 |
+
"block_count": "blockCount",
|
| 636 |
+
"attention_head_count": "attentionHeadCount",
|
| 637 |
+
"attention_head_count_kv": "attentionHeadCountKV",
|
| 638 |
+
"feed_forward_length": "feedForwardLength",
|
| 639 |
+
"rope_dimension_count": "ropeDimensionCount",
|
| 640 |
+
"quantization_version": "quantizationVersion",
|
| 641 |
+
"quantization_file_type": "quantizationFileType",
|
| 642 |
+
"modelExplainability": "modelCardExplainability"
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
taxonomy_mapped_keys = list(taxonomy_modelcard_mapping.keys())
|
| 646 |
+
|
| 647 |
+
for p_key, p_name in taxonomy_modelcard_mapping.items():
|
| 648 |
+
if p_key in metadata:
|
| 649 |
+
val = metadata[p_key]
|
| 650 |
+
if p_key == "hyperparameter" and isinstance(val, dict):
|
| 651 |
+
props.append({"name": f"genai:aibom:modelcard:{p_name}", "value": json.dumps(val)})
|
| 652 |
+
elif val is not None:
|
| 653 |
+
props.append({"name": f"genai:aibom:modelcard:{p_name}", "value": str(val)})
|
| 654 |
+
|
| 655 |
+
# Quantization dict handling
|
| 656 |
+
if "quantization" in metadata and isinstance(metadata["quantization"], dict):
|
| 657 |
+
q_dict = metadata["quantization"]
|
| 658 |
+
if "version" in q_dict:
|
| 659 |
+
props.append({"name": "genai:aibom:modelcard:quantizationVersion", "value": str(q_dict["version"])})
|
| 660 |
+
if "file_type" in q_dict:
|
| 661 |
+
props.append({"name": "genai:aibom:modelcard:quantizationFileType", "value": str(q_dict["file_type"])})
|
| 662 |
+
taxonomy_mapped_keys.append("quantization")
|
| 663 |
+
|
| 664 |
+
# Basic Fields we've already mapped to structured homes
|
| 665 |
+
mapped_fields = [
|
| 666 |
+
"primaryPurpose", "typeOfModel", "suppliedBy", "intendedUse",
|
| 667 |
+
"technicalLimitations", "ethicalConsiderations", "datasets", "eval_results",
|
| 668 |
+
"pipeline_tag", "name", "author", "license", "description",
|
| 669 |
+
"commit", "bomFormat", "specVersion", "version", "licenses",
|
| 670 |
+
"external_references", "tags", "library_name", "paper", "downloadLocation",
|
| 671 |
+
"gguf_filename", "gguf_license", "model_type", "architectures"
|
| 672 |
+
] + taxonomy_mapped_keys
|
| 673 |
+
|
| 674 |
+
for k, v in metadata.items():
|
| 675 |
+
if k not in mapped_fields and v is not None:
|
| 676 |
+
# Basic types only for properties
|
| 677 |
+
if isinstance(v, (str, int, float, bool)):
|
| 678 |
+
props.append({"name": k, "value": str(v)})
|
| 679 |
+
elif isinstance(v, list) and all(isinstance(x, (str, int, float, bool)) for x in v):
|
| 680 |
+
props.append({"name": k, "value": ", ".join(map(str, v))})
|
| 681 |
+
|
| 682 |
+
if props:
|
| 683 |
+
section["properties"] = props
|
| 684 |
+
|
| 685 |
+
return section
|
| 686 |
+
|
| 687 |
+
def _infer_io_formats(self, task: str) -> tuple:
|
| 688 |
+
"""
|
| 689 |
+
Infer input and output formats based on the pipeline task.
|
| 690 |
+
Returns (inputs: list, outputs: list)
|
| 691 |
+
"""
|
| 692 |
+
task = task.lower().strip()
|
| 693 |
+
|
| 694 |
+
# Text to Text
|
| 695 |
+
if task in ["text-generation", "text2text-generation", "summarization", "translation",
|
| 696 |
+
"conversational", "question-answering", "text-classification", "token-classification"]:
|
| 697 |
+
return (["string"], ["string"])
|
| 698 |
+
|
| 699 |
+
# Image to Text/Label
|
| 700 |
+
if task in ["image-classification", "object-detection", "image-segmentation"]:
|
| 701 |
+
return (["image"], ["string", "json"])
|
| 702 |
+
|
| 703 |
+
# Text to Image
|
| 704 |
+
if task in ["text-to-image"]:
|
| 705 |
+
return (["string"], ["image"])
|
| 706 |
+
|
| 707 |
+
# Audio
|
| 708 |
+
if task in ["automatic-speech-recognition", "audio-classification"]:
|
| 709 |
+
return (["audio"], ["string"])
|
| 710 |
+
if task in ["text-to-speech"]:
|
| 711 |
+
return (["string"], ["audio"])
|
| 712 |
+
|
| 713 |
+
# Multimodal
|
| 714 |
+
if task in ["visual-question-answering"]:
|
| 715 |
+
return (["image", "string"], ["string"])
|
| 716 |
+
|
| 717 |
+
# Tabular
|
| 718 |
+
if task in ["tabular-classification", "tabular-regression"]:
|
| 719 |
+
return (["csv", "json"], ["string", "number"])
|
| 720 |
+
|
| 721 |
+
return ([], [])
|
src/schemas/bom-1.6.schema.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/schemas/bom-1.7.schema.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/schemas/spdx.schema.json
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "http://json-schema.org/draft-07/schema#",
|
| 3 |
+
"$id": "http://cyclonedx.org/schema/spdx.schema.json",
|
| 4 |
+
"$comment": "v1.0-3.27.0",
|
| 5 |
+
"type": "string",
|
| 6 |
+
"enum": [
|
| 7 |
+
"0BSD",
|
| 8 |
+
"3D-Slicer-1.0",
|
| 9 |
+
"AAL",
|
| 10 |
+
"Abstyles",
|
| 11 |
+
"AdaCore-doc",
|
| 12 |
+
"Adobe-2006",
|
| 13 |
+
"Adobe-Display-PostScript",
|
| 14 |
+
"Adobe-Glyph",
|
| 15 |
+
"Adobe-Utopia",
|
| 16 |
+
"ADSL",
|
| 17 |
+
"AFL-1.1",
|
| 18 |
+
"AFL-1.2",
|
| 19 |
+
"AFL-2.0",
|
| 20 |
+
"AFL-2.1",
|
| 21 |
+
"AFL-3.0",
|
| 22 |
+
"Afmparse",
|
| 23 |
+
"AGPL-1.0",
|
| 24 |
+
"AGPL-1.0-only",
|
| 25 |
+
"AGPL-1.0-or-later",
|
| 26 |
+
"AGPL-3.0",
|
| 27 |
+
"AGPL-3.0-only",
|
| 28 |
+
"AGPL-3.0-or-later",
|
| 29 |
+
"Aladdin",
|
| 30 |
+
"AMD-newlib",
|
| 31 |
+
"AMDPLPA",
|
| 32 |
+
"AML",
|
| 33 |
+
"AML-glslang",
|
| 34 |
+
"AMPAS",
|
| 35 |
+
"ANTLR-PD",
|
| 36 |
+
"ANTLR-PD-fallback",
|
| 37 |
+
"any-OSI",
|
| 38 |
+
"any-OSI-perl-modules",
|
| 39 |
+
"Apache-1.0",
|
| 40 |
+
"Apache-1.1",
|
| 41 |
+
"Apache-2.0",
|
| 42 |
+
"APAFML",
|
| 43 |
+
"APL-1.0",
|
| 44 |
+
"App-s2p",
|
| 45 |
+
"APSL-1.0",
|
| 46 |
+
"APSL-1.1",
|
| 47 |
+
"APSL-1.2",
|
| 48 |
+
"APSL-2.0",
|
| 49 |
+
"Arphic-1999",
|
| 50 |
+
"Artistic-1.0",
|
| 51 |
+
"Artistic-1.0-cl8",
|
| 52 |
+
"Artistic-1.0-Perl",
|
| 53 |
+
"Artistic-2.0",
|
| 54 |
+
"Artistic-dist",
|
| 55 |
+
"Aspell-RU",
|
| 56 |
+
"ASWF-Digital-Assets-1.0",
|
| 57 |
+
"ASWF-Digital-Assets-1.1",
|
| 58 |
+
"Baekmuk",
|
| 59 |
+
"Bahyph",
|
| 60 |
+
"Barr",
|
| 61 |
+
"bcrypt-Solar-Designer",
|
| 62 |
+
"Beerware",
|
| 63 |
+
"Bitstream-Charter",
|
| 64 |
+
"Bitstream-Vera",
|
| 65 |
+
"BitTorrent-1.0",
|
| 66 |
+
"BitTorrent-1.1",
|
| 67 |
+
"blessing",
|
| 68 |
+
"BlueOak-1.0.0",
|
| 69 |
+
"Boehm-GC",
|
| 70 |
+
"Boehm-GC-without-fee",
|
| 71 |
+
"Borceux",
|
| 72 |
+
"Brian-Gladman-2-Clause",
|
| 73 |
+
"Brian-Gladman-3-Clause",
|
| 74 |
+
"BSD-1-Clause",
|
| 75 |
+
"BSD-2-Clause",
|
| 76 |
+
"BSD-2-Clause-Darwin",
|
| 77 |
+
"BSD-2-Clause-first-lines",
|
| 78 |
+
"BSD-2-Clause-FreeBSD",
|
| 79 |
+
"BSD-2-Clause-NetBSD",
|
| 80 |
+
"BSD-2-Clause-Patent",
|
| 81 |
+
"BSD-2-Clause-pkgconf-disclaimer",
|
| 82 |
+
"BSD-2-Clause-Views",
|
| 83 |
+
"BSD-3-Clause",
|
| 84 |
+
"BSD-3-Clause-acpica",
|
| 85 |
+
"BSD-3-Clause-Attribution",
|
| 86 |
+
"BSD-3-Clause-Clear",
|
| 87 |
+
"BSD-3-Clause-flex",
|
| 88 |
+
"BSD-3-Clause-HP",
|
| 89 |
+
"BSD-3-Clause-LBNL",
|
| 90 |
+
"BSD-3-Clause-Modification",
|
| 91 |
+
"BSD-3-Clause-No-Military-License",
|
| 92 |
+
"BSD-3-Clause-No-Nuclear-License",
|
| 93 |
+
"BSD-3-Clause-No-Nuclear-License-2014",
|
| 94 |
+
"BSD-3-Clause-No-Nuclear-Warranty",
|
| 95 |
+
"BSD-3-Clause-Open-MPI",
|
| 96 |
+
"BSD-3-Clause-Sun",
|
| 97 |
+
"BSD-4-Clause",
|
| 98 |
+
"BSD-4-Clause-Shortened",
|
| 99 |
+
"BSD-4-Clause-UC",
|
| 100 |
+
"BSD-4.3RENO",
|
| 101 |
+
"BSD-4.3TAHOE",
|
| 102 |
+
"BSD-Advertising-Acknowledgement",
|
| 103 |
+
"BSD-Attribution-HPND-disclaimer",
|
| 104 |
+
"BSD-Inferno-Nettverk",
|
| 105 |
+
"BSD-Protection",
|
| 106 |
+
"BSD-Source-beginning-file",
|
| 107 |
+
"BSD-Source-Code",
|
| 108 |
+
"BSD-Systemics",
|
| 109 |
+
"BSD-Systemics-W3Works",
|
| 110 |
+
"BSL-1.0",
|
| 111 |
+
"BUSL-1.1",
|
| 112 |
+
"bzip2-1.0.5",
|
| 113 |
+
"bzip2-1.0.6",
|
| 114 |
+
"C-UDA-1.0",
|
| 115 |
+
"CAL-1.0",
|
| 116 |
+
"CAL-1.0-Combined-Work-Exception",
|
| 117 |
+
"Caldera",
|
| 118 |
+
"Caldera-no-preamble",
|
| 119 |
+
"Catharon",
|
| 120 |
+
"CATOSL-1.1",
|
| 121 |
+
"CC-BY-1.0",
|
| 122 |
+
"CC-BY-2.0",
|
| 123 |
+
"CC-BY-2.5",
|
| 124 |
+
"CC-BY-2.5-AU",
|
| 125 |
+
"CC-BY-3.0",
|
| 126 |
+
"CC-BY-3.0-AT",
|
| 127 |
+
"CC-BY-3.0-AU",
|
| 128 |
+
"CC-BY-3.0-DE",
|
| 129 |
+
"CC-BY-3.0-IGO",
|
| 130 |
+
"CC-BY-3.0-NL",
|
| 131 |
+
"CC-BY-3.0-US",
|
| 132 |
+
"CC-BY-4.0",
|
| 133 |
+
"CC-BY-NC-1.0",
|
| 134 |
+
"CC-BY-NC-2.0",
|
| 135 |
+
"CC-BY-NC-2.5",
|
| 136 |
+
"CC-BY-NC-3.0",
|
| 137 |
+
"CC-BY-NC-3.0-DE",
|
| 138 |
+
"CC-BY-NC-4.0",
|
| 139 |
+
"CC-BY-NC-ND-1.0",
|
| 140 |
+
"CC-BY-NC-ND-2.0",
|
| 141 |
+
"CC-BY-NC-ND-2.5",
|
| 142 |
+
"CC-BY-NC-ND-3.0",
|
| 143 |
+
"CC-BY-NC-ND-3.0-DE",
|
| 144 |
+
"CC-BY-NC-ND-3.0-IGO",
|
| 145 |
+
"CC-BY-NC-ND-4.0",
|
| 146 |
+
"CC-BY-NC-SA-1.0",
|
| 147 |
+
"CC-BY-NC-SA-2.0",
|
| 148 |
+
"CC-BY-NC-SA-2.0-DE",
|
| 149 |
+
"CC-BY-NC-SA-2.0-FR",
|
| 150 |
+
"CC-BY-NC-SA-2.0-UK",
|
| 151 |
+
"CC-BY-NC-SA-2.5",
|
| 152 |
+
"CC-BY-NC-SA-3.0",
|
| 153 |
+
"CC-BY-NC-SA-3.0-DE",
|
| 154 |
+
"CC-BY-NC-SA-3.0-IGO",
|
| 155 |
+
"CC-BY-NC-SA-4.0",
|
| 156 |
+
"CC-BY-ND-1.0",
|
| 157 |
+
"CC-BY-ND-2.0",
|
| 158 |
+
"CC-BY-ND-2.5",
|
| 159 |
+
"CC-BY-ND-3.0",
|
| 160 |
+
"CC-BY-ND-3.0-DE",
|
| 161 |
+
"CC-BY-ND-4.0",
|
| 162 |
+
"CC-BY-SA-1.0",
|
| 163 |
+
"CC-BY-SA-2.0",
|
| 164 |
+
"CC-BY-SA-2.0-UK",
|
| 165 |
+
"CC-BY-SA-2.1-JP",
|
| 166 |
+
"CC-BY-SA-2.5",
|
| 167 |
+
"CC-BY-SA-3.0",
|
| 168 |
+
"CC-BY-SA-3.0-AT",
|
| 169 |
+
"CC-BY-SA-3.0-DE",
|
| 170 |
+
"CC-BY-SA-3.0-IGO",
|
| 171 |
+
"CC-BY-SA-4.0",
|
| 172 |
+
"CC-PDDC",
|
| 173 |
+
"CC-PDM-1.0",
|
| 174 |
+
"CC-SA-1.0",
|
| 175 |
+
"CC0-1.0",
|
| 176 |
+
"CDDL-1.0",
|
| 177 |
+
"CDDL-1.1",
|
| 178 |
+
"CDL-1.0",
|
| 179 |
+
"CDLA-Permissive-1.0",
|
| 180 |
+
"CDLA-Permissive-2.0",
|
| 181 |
+
"CDLA-Sharing-1.0",
|
| 182 |
+
"CECILL-1.0",
|
| 183 |
+
"CECILL-1.1",
|
| 184 |
+
"CECILL-2.0",
|
| 185 |
+
"CECILL-2.1",
|
| 186 |
+
"CECILL-B",
|
| 187 |
+
"CECILL-C",
|
| 188 |
+
"CERN-OHL-1.1",
|
| 189 |
+
"CERN-OHL-1.2",
|
| 190 |
+
"CERN-OHL-P-2.0",
|
| 191 |
+
"CERN-OHL-S-2.0",
|
| 192 |
+
"CERN-OHL-W-2.0",
|
| 193 |
+
"CFITSIO",
|
| 194 |
+
"check-cvs",
|
| 195 |
+
"checkmk",
|
| 196 |
+
"ClArtistic",
|
| 197 |
+
"Clips",
|
| 198 |
+
"CMU-Mach",
|
| 199 |
+
"CMU-Mach-nodoc",
|
| 200 |
+
"CNRI-Jython",
|
| 201 |
+
"CNRI-Python",
|
| 202 |
+
"CNRI-Python-GPL-Compatible",
|
| 203 |
+
"COIL-1.0",
|
| 204 |
+
"Community-Spec-1.0",
|
| 205 |
+
"Condor-1.1",
|
| 206 |
+
"copyleft-next-0.3.0",
|
| 207 |
+
"copyleft-next-0.3.1",
|
| 208 |
+
"Cornell-Lossless-JPEG",
|
| 209 |
+
"CPAL-1.0",
|
| 210 |
+
"CPL-1.0",
|
| 211 |
+
"CPOL-1.02",
|
| 212 |
+
"Cronyx",
|
| 213 |
+
"Crossword",
|
| 214 |
+
"CryptoSwift",
|
| 215 |
+
"CrystalStacker",
|
| 216 |
+
"CUA-OPL-1.0",
|
| 217 |
+
"Cube",
|
| 218 |
+
"curl",
|
| 219 |
+
"cve-tou",
|
| 220 |
+
"D-FSL-1.0",
|
| 221 |
+
"DEC-3-Clause",
|
| 222 |
+
"diffmark",
|
| 223 |
+
"DL-DE-BY-2.0",
|
| 224 |
+
"DL-DE-ZERO-2.0",
|
| 225 |
+
"DOC",
|
| 226 |
+
"DocBook-DTD",
|
| 227 |
+
"DocBook-Schema",
|
| 228 |
+
"DocBook-Stylesheet",
|
| 229 |
+
"DocBook-XML",
|
| 230 |
+
"Dotseqn",
|
| 231 |
+
"DRL-1.0",
|
| 232 |
+
"DRL-1.1",
|
| 233 |
+
"DSDP",
|
| 234 |
+
"dtoa",
|
| 235 |
+
"dvipdfm",
|
| 236 |
+
"ECL-1.0",
|
| 237 |
+
"ECL-2.0",
|
| 238 |
+
"eCos-2.0",
|
| 239 |
+
"EFL-1.0",
|
| 240 |
+
"EFL-2.0",
|
| 241 |
+
"eGenix",
|
| 242 |
+
"Elastic-2.0",
|
| 243 |
+
"Entessa",
|
| 244 |
+
"EPICS",
|
| 245 |
+
"EPL-1.0",
|
| 246 |
+
"EPL-2.0",
|
| 247 |
+
"ErlPL-1.1",
|
| 248 |
+
"etalab-2.0",
|
| 249 |
+
"EUDatagrid",
|
| 250 |
+
"EUPL-1.0",
|
| 251 |
+
"EUPL-1.1",
|
| 252 |
+
"EUPL-1.2",
|
| 253 |
+
"Eurosym",
|
| 254 |
+
"Fair",
|
| 255 |
+
"FBM",
|
| 256 |
+
"FDK-AAC",
|
| 257 |
+
"Ferguson-Twofish",
|
| 258 |
+
"Frameworx-1.0",
|
| 259 |
+
"FreeBSD-DOC",
|
| 260 |
+
"FreeImage",
|
| 261 |
+
"FSFAP",
|
| 262 |
+
"FSFAP-no-warranty-disclaimer",
|
| 263 |
+
"FSFUL",
|
| 264 |
+
"FSFULLR",
|
| 265 |
+
"FSFULLRSD",
|
| 266 |
+
"FSFULLRWD",
|
| 267 |
+
"FSL-1.1-ALv2",
|
| 268 |
+
"FSL-1.1-MIT",
|
| 269 |
+
"FTL",
|
| 270 |
+
"Furuseth",
|
| 271 |
+
"fwlw",
|
| 272 |
+
"Game-Programming-Gems",
|
| 273 |
+
"GCR-docs",
|
| 274 |
+
"GD",
|
| 275 |
+
"generic-xts",
|
| 276 |
+
"GFDL-1.1",
|
| 277 |
+
"GFDL-1.1-invariants-only",
|
| 278 |
+
"GFDL-1.1-invariants-or-later",
|
| 279 |
+
"GFDL-1.1-no-invariants-only",
|
| 280 |
+
"GFDL-1.1-no-invariants-or-later",
|
| 281 |
+
"GFDL-1.1-only",
|
| 282 |
+
"GFDL-1.1-or-later",
|
| 283 |
+
"GFDL-1.2",
|
| 284 |
+
"GFDL-1.2-invariants-only",
|
| 285 |
+
"GFDL-1.2-invariants-or-later",
|
| 286 |
+
"GFDL-1.2-no-invariants-only",
|
| 287 |
+
"GFDL-1.2-no-invariants-or-later",
|
| 288 |
+
"GFDL-1.2-only",
|
| 289 |
+
"GFDL-1.2-or-later",
|
| 290 |
+
"GFDL-1.3",
|
| 291 |
+
"GFDL-1.3-invariants-only",
|
| 292 |
+
"GFDL-1.3-invariants-or-later",
|
| 293 |
+
"GFDL-1.3-no-invariants-only",
|
| 294 |
+
"GFDL-1.3-no-invariants-or-later",
|
| 295 |
+
"GFDL-1.3-only",
|
| 296 |
+
"GFDL-1.3-or-later",
|
| 297 |
+
"Giftware",
|
| 298 |
+
"GL2PS",
|
| 299 |
+
"Glide",
|
| 300 |
+
"Glulxe",
|
| 301 |
+
"GLWTPL",
|
| 302 |
+
"gnuplot",
|
| 303 |
+
"GPL-1.0",
|
| 304 |
+
"GPL-1.0+",
|
| 305 |
+
"GPL-1.0-only",
|
| 306 |
+
"GPL-1.0-or-later",
|
| 307 |
+
"GPL-2.0",
|
| 308 |
+
"GPL-2.0+",
|
| 309 |
+
"GPL-2.0-only",
|
| 310 |
+
"GPL-2.0-or-later",
|
| 311 |
+
"GPL-2.0-with-autoconf-exception",
|
| 312 |
+
"GPL-2.0-with-bison-exception",
|
| 313 |
+
"GPL-2.0-with-classpath-exception",
|
| 314 |
+
"GPL-2.0-with-font-exception",
|
| 315 |
+
"GPL-2.0-with-GCC-exception",
|
| 316 |
+
"GPL-3.0",
|
| 317 |
+
"GPL-3.0+",
|
| 318 |
+
"GPL-3.0-only",
|
| 319 |
+
"GPL-3.0-or-later",
|
| 320 |
+
"GPL-3.0-with-autoconf-exception",
|
| 321 |
+
"GPL-3.0-with-GCC-exception",
|
| 322 |
+
"Graphics-Gems",
|
| 323 |
+
"gSOAP-1.3b",
|
| 324 |
+
"gtkbook",
|
| 325 |
+
"Gutmann",
|
| 326 |
+
"HaskellReport",
|
| 327 |
+
"HDF5",
|
| 328 |
+
"hdparm",
|
| 329 |
+
"HIDAPI",
|
| 330 |
+
"Hippocratic-2.1",
|
| 331 |
+
"HP-1986",
|
| 332 |
+
"HP-1989",
|
| 333 |
+
"HPND",
|
| 334 |
+
"HPND-DEC",
|
| 335 |
+
"HPND-doc",
|
| 336 |
+
"HPND-doc-sell",
|
| 337 |
+
"HPND-export-US",
|
| 338 |
+
"HPND-export-US-acknowledgement",
|
| 339 |
+
"HPND-export-US-modify",
|
| 340 |
+
"HPND-export2-US",
|
| 341 |
+
"HPND-Fenneberg-Livingston",
|
| 342 |
+
"HPND-INRIA-IMAG",
|
| 343 |
+
"HPND-Intel",
|
| 344 |
+
"HPND-Kevlin-Henney",
|
| 345 |
+
"HPND-Markus-Kuhn",
|
| 346 |
+
"HPND-merchantability-variant",
|
| 347 |
+
"HPND-MIT-disclaimer",
|
| 348 |
+
"HPND-Netrek",
|
| 349 |
+
"HPND-Pbmplus",
|
| 350 |
+
"HPND-sell-MIT-disclaimer-xserver",
|
| 351 |
+
"HPND-sell-regexpr",
|
| 352 |
+
"HPND-sell-variant",
|
| 353 |
+
"HPND-sell-variant-MIT-disclaimer",
|
| 354 |
+
"HPND-sell-variant-MIT-disclaimer-rev",
|
| 355 |
+
"HPND-UC",
|
| 356 |
+
"HPND-UC-export-US",
|
| 357 |
+
"HTMLTIDY",
|
| 358 |
+
"IBM-pibs",
|
| 359 |
+
"ICU",
|
| 360 |
+
"IEC-Code-Components-EULA",
|
| 361 |
+
"IJG",
|
| 362 |
+
"IJG-short",
|
| 363 |
+
"ImageMagick",
|
| 364 |
+
"iMatix",
|
| 365 |
+
"Imlib2",
|
| 366 |
+
"Info-ZIP",
|
| 367 |
+
"Inner-Net-2.0",
|
| 368 |
+
"InnoSetup",
|
| 369 |
+
"Intel",
|
| 370 |
+
"Intel-ACPI",
|
| 371 |
+
"Interbase-1.0",
|
| 372 |
+
"IPA",
|
| 373 |
+
"IPL-1.0",
|
| 374 |
+
"ISC",
|
| 375 |
+
"ISC-Veillard",
|
| 376 |
+
"Jam",
|
| 377 |
+
"JasPer-2.0",
|
| 378 |
+
"jove",
|
| 379 |
+
"JPL-image",
|
| 380 |
+
"JPNIC",
|
| 381 |
+
"JSON",
|
| 382 |
+
"Kastrup",
|
| 383 |
+
"Kazlib",
|
| 384 |
+
"Knuth-CTAN",
|
| 385 |
+
"LAL-1.2",
|
| 386 |
+
"LAL-1.3",
|
| 387 |
+
"Latex2e",
|
| 388 |
+
"Latex2e-translated-notice",
|
| 389 |
+
"Leptonica",
|
| 390 |
+
"LGPL-2.0",
|
| 391 |
+
"LGPL-2.0+",
|
| 392 |
+
"LGPL-2.0-only",
|
| 393 |
+
"LGPL-2.0-or-later",
|
| 394 |
+
"LGPL-2.1",
|
| 395 |
+
"LGPL-2.1+",
|
| 396 |
+
"LGPL-2.1-only",
|
| 397 |
+
"LGPL-2.1-or-later",
|
| 398 |
+
"LGPL-3.0",
|
| 399 |
+
"LGPL-3.0+",
|
| 400 |
+
"LGPL-3.0-only",
|
| 401 |
+
"LGPL-3.0-or-later",
|
| 402 |
+
"LGPLLR",
|
| 403 |
+
"Libpng",
|
| 404 |
+
"libpng-1.6.35",
|
| 405 |
+
"libpng-2.0",
|
| 406 |
+
"libselinux-1.0",
|
| 407 |
+
"libtiff",
|
| 408 |
+
"libutil-David-Nugent",
|
| 409 |
+
"LiLiQ-P-1.1",
|
| 410 |
+
"LiLiQ-R-1.1",
|
| 411 |
+
"LiLiQ-Rplus-1.1",
|
| 412 |
+
"Linux-man-pages-1-para",
|
| 413 |
+
"Linux-man-pages-copyleft",
|
| 414 |
+
"Linux-man-pages-copyleft-2-para",
|
| 415 |
+
"Linux-man-pages-copyleft-var",
|
| 416 |
+
"Linux-OpenIB",
|
| 417 |
+
"LOOP",
|
| 418 |
+
"LPD-document",
|
| 419 |
+
"LPL-1.0",
|
| 420 |
+
"LPL-1.02",
|
| 421 |
+
"LPPL-1.0",
|
| 422 |
+
"LPPL-1.1",
|
| 423 |
+
"LPPL-1.2",
|
| 424 |
+
"LPPL-1.3a",
|
| 425 |
+
"LPPL-1.3c",
|
| 426 |
+
"lsof",
|
| 427 |
+
"Lucida-Bitmap-Fonts",
|
| 428 |
+
"LZMA-SDK-9.11-to-9.20",
|
| 429 |
+
"LZMA-SDK-9.22",
|
| 430 |
+
"Mackerras-3-Clause",
|
| 431 |
+
"Mackerras-3-Clause-acknowledgment",
|
| 432 |
+
"magaz",
|
| 433 |
+
"mailprio",
|
| 434 |
+
"MakeIndex",
|
| 435 |
+
"man2html",
|
| 436 |
+
"Martin-Birgmeier",
|
| 437 |
+
"McPhee-slideshow",
|
| 438 |
+
"metamail",
|
| 439 |
+
"Minpack",
|
| 440 |
+
"MIPS",
|
| 441 |
+
"MirOS",
|
| 442 |
+
"MIT",
|
| 443 |
+
"MIT-0",
|
| 444 |
+
"MIT-advertising",
|
| 445 |
+
"MIT-Click",
|
| 446 |
+
"MIT-CMU",
|
| 447 |
+
"MIT-enna",
|
| 448 |
+
"MIT-feh",
|
| 449 |
+
"MIT-Festival",
|
| 450 |
+
"MIT-Khronos-old",
|
| 451 |
+
"MIT-Modern-Variant",
|
| 452 |
+
"MIT-open-group",
|
| 453 |
+
"MIT-testregex",
|
| 454 |
+
"MIT-Wu",
|
| 455 |
+
"MITNFA",
|
| 456 |
+
"MMIXware",
|
| 457 |
+
"Motosoto",
|
| 458 |
+
"MPEG-SSG",
|
| 459 |
+
"mpi-permissive",
|
| 460 |
+
"mpich2",
|
| 461 |
+
"MPL-1.0",
|
| 462 |
+
"MPL-1.1",
|
| 463 |
+
"MPL-2.0",
|
| 464 |
+
"MPL-2.0-no-copyleft-exception",
|
| 465 |
+
"mplus",
|
| 466 |
+
"MS-LPL",
|
| 467 |
+
"MS-PL",
|
| 468 |
+
"MS-RL",
|
| 469 |
+
"MTLL",
|
| 470 |
+
"MulanPSL-1.0",
|
| 471 |
+
"MulanPSL-2.0",
|
| 472 |
+
"Multics",
|
| 473 |
+
"Mup",
|
| 474 |
+
"NAIST-2003",
|
| 475 |
+
"NASA-1.3",
|
| 476 |
+
"Naumen",
|
| 477 |
+
"NBPL-1.0",
|
| 478 |
+
"NCBI-PD",
|
| 479 |
+
"NCGL-UK-2.0",
|
| 480 |
+
"NCL",
|
| 481 |
+
"NCSA",
|
| 482 |
+
"Net-SNMP",
|
| 483 |
+
"NetCDF",
|
| 484 |
+
"Newsletr",
|
| 485 |
+
"NGPL",
|
| 486 |
+
"ngrep",
|
| 487 |
+
"NICTA-1.0",
|
| 488 |
+
"NIST-PD",
|
| 489 |
+
"NIST-PD-fallback",
|
| 490 |
+
"NIST-Software",
|
| 491 |
+
"NLOD-1.0",
|
| 492 |
+
"NLOD-2.0",
|
| 493 |
+
"NLPL",
|
| 494 |
+
"Nokia",
|
| 495 |
+
"NOSL",
|
| 496 |
+
"Noweb",
|
| 497 |
+
"NPL-1.0",
|
| 498 |
+
"NPL-1.1",
|
| 499 |
+
"NPOSL-3.0",
|
| 500 |
+
"NRL",
|
| 501 |
+
"NTIA-PD",
|
| 502 |
+
"NTP",
|
| 503 |
+
"NTP-0",
|
| 504 |
+
"Nunit",
|
| 505 |
+
"O-UDA-1.0",
|
| 506 |
+
"OAR",
|
| 507 |
+
"OCCT-PL",
|
| 508 |
+
"OCLC-2.0",
|
| 509 |
+
"ODbL-1.0",
|
| 510 |
+
"ODC-By-1.0",
|
| 511 |
+
"OFFIS",
|
| 512 |
+
"OFL-1.0",
|
| 513 |
+
"OFL-1.0-no-RFN",
|
| 514 |
+
"OFL-1.0-RFN",
|
| 515 |
+
"OFL-1.1",
|
| 516 |
+
"OFL-1.1-no-RFN",
|
| 517 |
+
"OFL-1.1-RFN",
|
| 518 |
+
"OGC-1.0",
|
| 519 |
+
"OGDL-Taiwan-1.0",
|
| 520 |
+
"OGL-Canada-2.0",
|
| 521 |
+
"OGL-UK-1.0",
|
| 522 |
+
"OGL-UK-2.0",
|
| 523 |
+
"OGL-UK-3.0",
|
| 524 |
+
"OGTSL",
|
| 525 |
+
"OLDAP-1.1",
|
| 526 |
+
"OLDAP-1.2",
|
| 527 |
+
"OLDAP-1.3",
|
| 528 |
+
"OLDAP-1.4",
|
| 529 |
+
"OLDAP-2.0",
|
| 530 |
+
"OLDAP-2.0.1",
|
| 531 |
+
"OLDAP-2.1",
|
| 532 |
+
"OLDAP-2.2",
|
| 533 |
+
"OLDAP-2.2.1",
|
| 534 |
+
"OLDAP-2.2.2",
|
| 535 |
+
"OLDAP-2.3",
|
| 536 |
+
"OLDAP-2.4",
|
| 537 |
+
"OLDAP-2.5",
|
| 538 |
+
"OLDAP-2.6",
|
| 539 |
+
"OLDAP-2.7",
|
| 540 |
+
"OLDAP-2.8",
|
| 541 |
+
"OLFL-1.3",
|
| 542 |
+
"OML",
|
| 543 |
+
"OpenPBS-2.3",
|
| 544 |
+
"OpenSSL",
|
| 545 |
+
"OpenSSL-standalone",
|
| 546 |
+
"OpenVision",
|
| 547 |
+
"OPL-1.0",
|
| 548 |
+
"OPL-UK-3.0",
|
| 549 |
+
"OPUBL-1.0",
|
| 550 |
+
"OSET-PL-2.1",
|
| 551 |
+
"OSL-1.0",
|
| 552 |
+
"OSL-1.1",
|
| 553 |
+
"OSL-2.0",
|
| 554 |
+
"OSL-2.1",
|
| 555 |
+
"OSL-3.0",
|
| 556 |
+
"PADL",
|
| 557 |
+
"Parity-6.0.0",
|
| 558 |
+
"Parity-7.0.0",
|
| 559 |
+
"PDDL-1.0",
|
| 560 |
+
"PHP-3.0",
|
| 561 |
+
"PHP-3.01",
|
| 562 |
+
"Pixar",
|
| 563 |
+
"pkgconf",
|
| 564 |
+
"Plexus",
|
| 565 |
+
"pnmstitch",
|
| 566 |
+
"PolyForm-Noncommercial-1.0.0",
|
| 567 |
+
"PolyForm-Small-Business-1.0.0",
|
| 568 |
+
"PostgreSQL",
|
| 569 |
+
"PPL",
|
| 570 |
+
"PSF-2.0",
|
| 571 |
+
"psfrag",
|
| 572 |
+
"psutils",
|
| 573 |
+
"Python-2.0",
|
| 574 |
+
"Python-2.0.1",
|
| 575 |
+
"python-ldap",
|
| 576 |
+
"Qhull",
|
| 577 |
+
"QPL-1.0",
|
| 578 |
+
"QPL-1.0-INRIA-2004",
|
| 579 |
+
"radvd",
|
| 580 |
+
"Rdisc",
|
| 581 |
+
"RHeCos-1.1",
|
| 582 |
+
"RPL-1.1",
|
| 583 |
+
"RPL-1.5",
|
| 584 |
+
"RPSL-1.0",
|
| 585 |
+
"RSA-MD",
|
| 586 |
+
"RSCPL",
|
| 587 |
+
"Ruby",
|
| 588 |
+
"Ruby-pty",
|
| 589 |
+
"SAX-PD",
|
| 590 |
+
"SAX-PD-2.0",
|
| 591 |
+
"Saxpath",
|
| 592 |
+
"SCEA",
|
| 593 |
+
"SchemeReport",
|
| 594 |
+
"Sendmail",
|
| 595 |
+
"Sendmail-8.23",
|
| 596 |
+
"Sendmail-Open-Source-1.1",
|
| 597 |
+
"SGI-B-1.0",
|
| 598 |
+
"SGI-B-1.1",
|
| 599 |
+
"SGI-B-2.0",
|
| 600 |
+
"SGI-OpenGL",
|
| 601 |
+
"SGP4",
|
| 602 |
+
"SHL-0.5",
|
| 603 |
+
"SHL-0.51",
|
| 604 |
+
"SimPL-2.0",
|
| 605 |
+
"SISSL",
|
| 606 |
+
"SISSL-1.2",
|
| 607 |
+
"SL",
|
| 608 |
+
"Sleepycat",
|
| 609 |
+
"SMAIL-GPL",
|
| 610 |
+
"SMLNJ",
|
| 611 |
+
"SMPPL",
|
| 612 |
+
"SNIA",
|
| 613 |
+
"snprintf",
|
| 614 |
+
"SOFA",
|
| 615 |
+
"softSurfer",
|
| 616 |
+
"Soundex",
|
| 617 |
+
"Spencer-86",
|
| 618 |
+
"Spencer-94",
|
| 619 |
+
"Spencer-99",
|
| 620 |
+
"SPL-1.0",
|
| 621 |
+
"ssh-keyscan",
|
| 622 |
+
"SSH-OpenSSH",
|
| 623 |
+
"SSH-short",
|
| 624 |
+
"SSLeay-standalone",
|
| 625 |
+
"SSPL-1.0",
|
| 626 |
+
"StandardML-NJ",
|
| 627 |
+
"SugarCRM-1.1.3",
|
| 628 |
+
"SUL-1.0",
|
| 629 |
+
"Sun-PPP",
|
| 630 |
+
"Sun-PPP-2000",
|
| 631 |
+
"SunPro",
|
| 632 |
+
"SWL",
|
| 633 |
+
"swrule",
|
| 634 |
+
"Symlinks",
|
| 635 |
+
"TAPR-OHL-1.0",
|
| 636 |
+
"TCL",
|
| 637 |
+
"TCP-wrappers",
|
| 638 |
+
"TermReadKey",
|
| 639 |
+
"TGPPL-1.0",
|
| 640 |
+
"ThirdEye",
|
| 641 |
+
"threeparttable",
|
| 642 |
+
"TMate",
|
| 643 |
+
"TORQUE-1.1",
|
| 644 |
+
"TOSL",
|
| 645 |
+
"TPDL",
|
| 646 |
+
"TPL-1.0",
|
| 647 |
+
"TrustedQSL",
|
| 648 |
+
"TTWL",
|
| 649 |
+
"TTYP0",
|
| 650 |
+
"TU-Berlin-1.0",
|
| 651 |
+
"TU-Berlin-2.0",
|
| 652 |
+
"Ubuntu-font-1.0",
|
| 653 |
+
"UCAR",
|
| 654 |
+
"UCL-1.0",
|
| 655 |
+
"ulem",
|
| 656 |
+
"UMich-Merit",
|
| 657 |
+
"Unicode-3.0",
|
| 658 |
+
"Unicode-DFS-2015",
|
| 659 |
+
"Unicode-DFS-2016",
|
| 660 |
+
"Unicode-TOU",
|
| 661 |
+
"UnixCrypt",
|
| 662 |
+
"Unlicense",
|
| 663 |
+
"Unlicense-libtelnet",
|
| 664 |
+
"Unlicense-libwhirlpool",
|
| 665 |
+
"UPL-1.0",
|
| 666 |
+
"URT-RLE",
|
| 667 |
+
"Vim",
|
| 668 |
+
"VOSTROM",
|
| 669 |
+
"VSL-1.0",
|
| 670 |
+
"W3C",
|
| 671 |
+
"W3C-19980720",
|
| 672 |
+
"W3C-20150513",
|
| 673 |
+
"w3m",
|
| 674 |
+
"Watcom-1.0",
|
| 675 |
+
"Widget-Workshop",
|
| 676 |
+
"Wsuipa",
|
| 677 |
+
"WTFPL",
|
| 678 |
+
"wwl",
|
| 679 |
+
"wxWindows",
|
| 680 |
+
"X11",
|
| 681 |
+
"X11-distribute-modifications-variant",
|
| 682 |
+
"X11-swapped",
|
| 683 |
+
"Xdebug-1.03",
|
| 684 |
+
"Xerox",
|
| 685 |
+
"Xfig",
|
| 686 |
+
"XFree86-1.1",
|
| 687 |
+
"xinetd",
|
| 688 |
+
"xkeyboard-config-Zinoviev",
|
| 689 |
+
"xlock",
|
| 690 |
+
"Xnet",
|
| 691 |
+
"xpp",
|
| 692 |
+
"XSkat",
|
| 693 |
+
"xzoom",
|
| 694 |
+
"YPL-1.0",
|
| 695 |
+
"YPL-1.1",
|
| 696 |
+
"Zed",
|
| 697 |
+
"Zeeff",
|
| 698 |
+
"Zend-2.0",
|
| 699 |
+
"Zimbra-1.3",
|
| 700 |
+
"Zimbra-1.4",
|
| 701 |
+
"Zlib",
|
| 702 |
+
"zlib-acknowledgement",
|
| 703 |
+
"ZPL-1.1",
|
| 704 |
+
"ZPL-2.0",
|
| 705 |
+
"ZPL-2.1",
|
| 706 |
+
"389-exception",
|
| 707 |
+
"Asterisk-exception",
|
| 708 |
+
"Asterisk-linking-protocols-exception",
|
| 709 |
+
"Autoconf-exception-2.0",
|
| 710 |
+
"Autoconf-exception-3.0",
|
| 711 |
+
"Autoconf-exception-generic",
|
| 712 |
+
"Autoconf-exception-generic-3.0",
|
| 713 |
+
"Autoconf-exception-macro",
|
| 714 |
+
"Bison-exception-1.24",
|
| 715 |
+
"Bison-exception-2.2",
|
| 716 |
+
"Bootloader-exception",
|
| 717 |
+
"CGAL-linking-exception",
|
| 718 |
+
"Classpath-exception-2.0",
|
| 719 |
+
"CLISP-exception-2.0",
|
| 720 |
+
"cryptsetup-OpenSSL-exception",
|
| 721 |
+
"Digia-Qt-LGPL-exception-1.1",
|
| 722 |
+
"DigiRule-FOSS-exception",
|
| 723 |
+
"eCos-exception-2.0",
|
| 724 |
+
"erlang-otp-linking-exception",
|
| 725 |
+
"Fawkes-Runtime-exception",
|
| 726 |
+
"FLTK-exception",
|
| 727 |
+
"fmt-exception",
|
| 728 |
+
"Font-exception-2.0",
|
| 729 |
+
"freertos-exception-2.0",
|
| 730 |
+
"GCC-exception-2.0",
|
| 731 |
+
"GCC-exception-2.0-note",
|
| 732 |
+
"GCC-exception-3.1",
|
| 733 |
+
"Gmsh-exception",
|
| 734 |
+
"GNAT-exception",
|
| 735 |
+
"GNOME-examples-exception",
|
| 736 |
+
"GNU-compiler-exception",
|
| 737 |
+
"gnu-javamail-exception",
|
| 738 |
+
"GPL-3.0-389-ds-base-exception",
|
| 739 |
+
"GPL-3.0-interface-exception",
|
| 740 |
+
"GPL-3.0-linking-exception",
|
| 741 |
+
"GPL-3.0-linking-source-exception",
|
| 742 |
+
"GPL-CC-1.0",
|
| 743 |
+
"GStreamer-exception-2005",
|
| 744 |
+
"GStreamer-exception-2008",
|
| 745 |
+
"harbour-exception",
|
| 746 |
+
"i2p-gpl-java-exception",
|
| 747 |
+
"Independent-modules-exception",
|
| 748 |
+
"KiCad-libraries-exception",
|
| 749 |
+
"LGPL-3.0-linking-exception",
|
| 750 |
+
"libpri-OpenH323-exception",
|
| 751 |
+
"Libtool-exception",
|
| 752 |
+
"Linux-syscall-note",
|
| 753 |
+
"LLGPL",
|
| 754 |
+
"LLVM-exception",
|
| 755 |
+
"LZMA-exception",
|
| 756 |
+
"mif-exception",
|
| 757 |
+
"mxml-exception",
|
| 758 |
+
"Nokia-Qt-exception-1.1",
|
| 759 |
+
"OCaml-LGPL-linking-exception",
|
| 760 |
+
"OCCT-exception-1.0",
|
| 761 |
+
"OpenJDK-assembly-exception-1.0",
|
| 762 |
+
"openvpn-openssl-exception",
|
| 763 |
+
"PCRE2-exception",
|
| 764 |
+
"polyparse-exception",
|
| 765 |
+
"PS-or-PDF-font-exception-20170817",
|
| 766 |
+
"QPL-1.0-INRIA-2004-exception",
|
| 767 |
+
"Qt-GPL-exception-1.0",
|
| 768 |
+
"Qt-LGPL-exception-1.1",
|
| 769 |
+
"Qwt-exception-1.0",
|
| 770 |
+
"romic-exception",
|
| 771 |
+
"RRDtool-FLOSS-exception-2.0",
|
| 772 |
+
"SANE-exception",
|
| 773 |
+
"SHL-2.0",
|
| 774 |
+
"SHL-2.1",
|
| 775 |
+
"stunnel-exception",
|
| 776 |
+
"SWI-exception",
|
| 777 |
+
"Swift-exception",
|
| 778 |
+
"Texinfo-exception",
|
| 779 |
+
"u-boot-exception-2.0",
|
| 780 |
+
"UBDL-exception",
|
| 781 |
+
"Universal-FOSS-exception-1.0",
|
| 782 |
+
"vsftpd-openssl-exception",
|
| 783 |
+
"WxWindows-exception-3.1",
|
| 784 |
+
"x11vnc-openssl-exception"
|
| 785 |
+
]
|
| 786 |
+
}
|
src/static/css/style.css
ADDED
|
@@ -0,0 +1,1288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600;700&display=swap');
|
| 2 |
+
|
| 3 |
+
/* Base & Common */
|
| 4 |
+
body {
|
| 5 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 6 |
+
margin: 0;
|
| 7 |
+
padding: 0;
|
| 8 |
+
line-height: 1.6;
|
| 9 |
+
color: #333;
|
| 10 |
+
background-color: #f9f9f9;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
h1,
|
| 14 |
+
h2,
|
| 15 |
+
h3,
|
| 16 |
+
h4,
|
| 17 |
+
h5,
|
| 18 |
+
h6 {
|
| 19 |
+
font-family: 'Poppins', sans-serif;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
h1 {
|
| 23 |
+
font-weight: 700;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
h2 {
|
| 27 |
+
font-weight: 600;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
h3 {
|
| 31 |
+
font-weight: 600;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
h4 {
|
| 35 |
+
font-weight: 500;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
h5 {
|
| 39 |
+
font-weight: 500;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
h6 {
|
| 43 |
+
font-weight: 400;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
.container {
|
| 47 |
+
max-width: 1000px;
|
| 48 |
+
margin: 0 auto;
|
| 49 |
+
padding: 0 20px;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
code {
|
| 53 |
+
background-color: #f8f9fa;
|
| 54 |
+
padding: 2px 5px;
|
| 55 |
+
border-radius: 4px;
|
| 56 |
+
font-family: monospace;
|
| 57 |
+
font-size: 14px;
|
| 58 |
+
color: #e74c3c;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
a {
|
| 62 |
+
color: #3498db;
|
| 63 |
+
text-decoration: none;
|
| 64 |
+
transition: color 0.3s;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
a:hover {
|
| 68 |
+
color: #2980b9;
|
| 69 |
+
text-decoration: underline;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
/* Header */
|
| 73 |
+
.header {
|
| 74 |
+
position: relative;
|
| 75 |
+
background-color: #ffffff;
|
| 76 |
+
padding: 15px 20px;
|
| 77 |
+
border-bottom: 1px solid #e9ecef;
|
| 78 |
+
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.05);
|
| 79 |
+
display: flex;
|
| 80 |
+
align-items: center;
|
| 81 |
+
justify-content: space-between;
|
| 82 |
+
margin-bottom: 30px;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
.header-left {
|
| 86 |
+
display: flex;
|
| 87 |
+
align-items: center;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
.header img {
|
| 91 |
+
height: 60px;
|
| 92 |
+
margin-right: 15px;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
.header-content {
|
| 96 |
+
position: absolute;
|
| 97 |
+
left: 50%;
|
| 98 |
+
transform: translateX(-50%);
|
| 99 |
+
display: flex;
|
| 100 |
+
flex-direction: column;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
.header h1 {
|
| 104 |
+
margin: 0;
|
| 105 |
+
font-family: 'Poppins', sans-serif;
|
| 106 |
+
font-size: 28px;
|
| 107 |
+
color: #2c3e50;
|
| 108 |
+
font-weight: 700;
|
| 109 |
+
margin-top: 5px;
|
| 110 |
+
/* Adjusting down to align with logo */
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
.header-right {
|
| 114 |
+
display: flex;
|
| 115 |
+
gap: 10px;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/* Buttons */
|
| 119 |
+
button {
|
| 120 |
+
padding: 12px 20px;
|
| 121 |
+
background-color: #3498db;
|
| 122 |
+
color: white;
|
| 123 |
+
border: none;
|
| 124 |
+
border-radius: 6px;
|
| 125 |
+
cursor: pointer;
|
| 126 |
+
font-size: 15px;
|
| 127 |
+
font-weight: 500;
|
| 128 |
+
transition: background-color 0.3s;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
button:hover {
|
| 132 |
+
background-color: #2980b9;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
button:disabled {
|
| 136 |
+
background-color: #bdc3c7;
|
| 137 |
+
cursor: not-allowed;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
.button {
|
| 141 |
+
display: inline-block;
|
| 142 |
+
padding: 12px 20px;
|
| 143 |
+
background-color: #7f8c8d;
|
| 144 |
+
color: white;
|
| 145 |
+
border: none;
|
| 146 |
+
border-radius: 6px;
|
| 147 |
+
cursor: pointer;
|
| 148 |
+
font-size: 15px;
|
| 149 |
+
font-weight: 500;
|
| 150 |
+
text-decoration: none;
|
| 151 |
+
transition: background-color 0.3s;
|
| 152 |
+
margin-bottom: 20px;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
.button:hover {
|
| 156 |
+
background-color: #95a5a6;
|
| 157 |
+
text-decoration: none;
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
.github-button {
|
| 161 |
+
display: inline-block;
|
| 162 |
+
padding: 12px 20px;
|
| 163 |
+
background-color: #3498db;
|
| 164 |
+
color: white;
|
| 165 |
+
text-decoration: none;
|
| 166 |
+
border-radius: 6px;
|
| 167 |
+
font-weight: 500;
|
| 168 |
+
font-size: 15px;
|
| 169 |
+
transition: background-color 0.3s;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
.github-button:hover {
|
| 173 |
+
background-color: #2980b9;
|
| 174 |
+
color: white;
|
| 175 |
+
text-decoration: none;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
.generate-another-btn {
|
| 179 |
+
display: inline-flex;
|
| 180 |
+
align-items: center;
|
| 181 |
+
justify-content: center;
|
| 182 |
+
padding: 0 16px;
|
| 183 |
+
height: 38px;
|
| 184 |
+
background: linear-gradient(135deg, rgb(66, 92, 187), rgb(116, 142, 237));
|
| 185 |
+
color: #ffffff !important;
|
| 186 |
+
font-weight: 600;
|
| 187 |
+
border-radius: 19px;
|
| 188 |
+
font-size: 14px;
|
| 189 |
+
transition: all 0.3s ease;
|
| 190 |
+
text-decoration: none !important;
|
| 191 |
+
gap: 8px;
|
| 192 |
+
font-family: inherit;
|
| 193 |
+
cursor: pointer;
|
| 194 |
+
border: none;
|
| 195 |
+
box-shadow: 0 2px 4px rgba(66, 92, 187, 0.3);
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
.generate-another-btn:hover {
|
| 199 |
+
background: linear-gradient(135deg, rgb(86, 112, 207), rgb(136, 162, 255));
|
| 200 |
+
color: #ffffff !important;
|
| 201 |
+
transform: translateY(-2px);
|
| 202 |
+
box-shadow: 0 4px 8px rgba(66, 92, 187, 0.4);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
/* Content Sections */
|
| 206 |
+
.content-section {
|
| 207 |
+
background-color: #ffffff;
|
| 208 |
+
border-radius: 8px;
|
| 209 |
+
padding: 25px;
|
| 210 |
+
margin-bottom: 30px;
|
| 211 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
.content-section:last-child {
|
| 215 |
+
margin-bottom: 0;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
.content-section h2 {
|
| 219 |
+
color: #2c3e50;
|
| 220 |
+
margin-top: 0;
|
| 221 |
+
margin-bottom: 20px;
|
| 222 |
+
font-size: 22px;
|
| 223 |
+
border-bottom: 2px solid #f0f0f0;
|
| 224 |
+
padding-bottom: 10px;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
.content-section h3 {
|
| 228 |
+
color: #2c3e50;
|
| 229 |
+
margin-top: 0;
|
| 230 |
+
margin-bottom: 15px;
|
| 231 |
+
font-size: 20px;
|
| 232 |
+
/* result.html has 20px, index 18px */
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
.content-section p {
|
| 236 |
+
margin-bottom: 20px;
|
| 237 |
+
font-size: 16px;
|
| 238 |
+
line-height: 1.7;
|
| 239 |
+
color: #555;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
/* Forms (from index.html) */
|
| 243 |
+
.form-section {
|
| 244 |
+
background-color: #ffffff;
|
| 245 |
+
border-radius: 8px;
|
| 246 |
+
padding: 25px;
|
| 247 |
+
margin-bottom: 30px;
|
| 248 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
.form-section p {
|
| 252 |
+
margin-bottom: 20px;
|
| 253 |
+
font-size: 16px;
|
| 254 |
+
color: #555;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
form {
|
| 258 |
+
margin: 20px 0;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
input[type="text"] {
|
| 262 |
+
padding: 12px;
|
| 263 |
+
border: 1px solid #ddd;
|
| 264 |
+
border-radius: 6px;
|
| 265 |
+
margin-right: 10px;
|
| 266 |
+
width: 350px;
|
| 267 |
+
font-size: 15px;
|
| 268 |
+
transition: border-color 0.3s;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
input[type="text"]:focus {
|
| 272 |
+
border-color: #3498db;
|
| 273 |
+
outline: none;
|
| 274 |
+
box-shadow: 0 0 5px rgba(52, 152, 219, 0.3);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
/* Result Specific Modules */
|
| 278 |
+
.success-message {
|
| 279 |
+
text-align: left;
|
| 280 |
+
padding: 15px;
|
| 281 |
+
background-color: #d4edda;
|
| 282 |
+
border: 1px solid #c3e6cb;
|
| 283 |
+
border-radius: 8px;
|
| 284 |
+
margin-bottom: 20px;
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
.success-message h2 {
|
| 288 |
+
margin: 0;
|
| 289 |
+
font-size: 18px;
|
| 290 |
+
color: #155724;
|
| 291 |
+
font-weight: 500;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
.model-name {
|
| 295 |
+
font-weight: 600;
|
| 296 |
+
color: #2c3e50;
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
.aibom-viewer {
|
| 300 |
+
margin: 20px 0;
|
| 301 |
+
border: 1px solid #e9ecef;
|
| 302 |
+
border-radius: 8px;
|
| 303 |
+
padding: 20px;
|
| 304 |
+
background-color: #f9f9f9;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
.aibom-section {
|
| 308 |
+
margin-bottom: 20px;
|
| 309 |
+
padding: 20px;
|
| 310 |
+
border-radius: 8px;
|
| 311 |
+
background-color: white;
|
| 312 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
.aibom-section:last-child {
|
| 316 |
+
margin-bottom: 0;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
.aibom-section h4 {
|
| 320 |
+
margin-top: 0;
|
| 321 |
+
color: #2c3e50;
|
| 322 |
+
border-bottom: 2px solid #f0f0f0;
|
| 323 |
+
padding-bottom: 10px;
|
| 324 |
+
margin-bottom: 15px;
|
| 325 |
+
font-size: 18px;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
.aibom-property {
|
| 329 |
+
display: flex;
|
| 330 |
+
margin: 10px 0;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
.aibom-property:last-child {
|
| 334 |
+
margin-bottom: 0;
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
.property-name {
|
| 338 |
+
font-weight: bold;
|
| 339 |
+
width: 200px;
|
| 340 |
+
color: #34495e;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
.property-value {
|
| 344 |
+
flex: 1;
|
| 345 |
+
color: #555;
|
| 346 |
+
line-height: 1.6;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
.aibom-tabs {
|
| 350 |
+
display: flex;
|
| 351 |
+
border-bottom: 1px solid #e9ecef;
|
| 352 |
+
margin-bottom: 20px;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
.aibom-tab {
|
| 356 |
+
padding: 12px 20px;
|
| 357 |
+
cursor: pointer;
|
| 358 |
+
background-color: #f8f9fa;
|
| 359 |
+
margin-right: 5px;
|
| 360 |
+
border-radius: 8px 8px 0 0;
|
| 361 |
+
font-weight: 500;
|
| 362 |
+
transition: all 0.3s ease;
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
.aibom-tab.active {
|
| 366 |
+
background-color: #6c7a89;
|
| 367 |
+
color: white;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
.aibom-tab:hover:not(.active) {
|
| 371 |
+
background-color: #e9ecef;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
.tab-content {
|
| 375 |
+
display: none;
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
.tab-content.active {
|
| 379 |
+
display: block;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
.json-view {
|
| 383 |
+
background-color: #f8f9fa;
|
| 384 |
+
border: 1px solid #e9ecef;
|
| 385 |
+
border-radius: 8px;
|
| 386 |
+
padding: 20px;
|
| 387 |
+
overflow: auto;
|
| 388 |
+
max-height: 500px;
|
| 389 |
+
font-family: monospace;
|
| 390 |
+
line-height: 1.5;
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
.collapsible {
|
| 394 |
+
cursor: pointer;
|
| 395 |
+
position: relative;
|
| 396 |
+
transition: all 0.3s ease;
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
.collapsible:after {
|
| 400 |
+
content: '+';
|
| 401 |
+
position: absolute;
|
| 402 |
+
right: 10px;
|
| 403 |
+
font-weight: bold;
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
.collapsible.active:after {
|
| 407 |
+
content: '-';
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
.collapsible-content {
|
| 411 |
+
max-height: 0;
|
| 412 |
+
overflow: hidden;
|
| 413 |
+
transition: max-height 0.3s ease-out;
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
.collapsible-content.active {
|
| 417 |
+
max-height: 500px;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
.tag {
|
| 421 |
+
display: inline-block;
|
| 422 |
+
background-color: #e9ecef;
|
| 423 |
+
padding: 4px 10px;
|
| 424 |
+
border-radius: 16px;
|
| 425 |
+
margin: 3px;
|
| 426 |
+
font-size: 0.9em;
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
.key-info {
|
| 430 |
+
background-color: #e3f2fd;
|
| 431 |
+
border-left: 4px solid #2196F3;
|
| 432 |
+
padding: 20px;
|
| 433 |
+
margin-bottom: 20px;
|
| 434 |
+
border-radius: 8px;
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
.key-info h3,
|
| 438 |
+
.completeness-profile h3 {
|
| 439 |
+
margin-top: 0;
|
| 440 |
+
margin-bottom: 15px;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
/* Tables & Scoring */
|
| 444 |
+
table {
|
| 445 |
+
border-collapse: collapse;
|
| 446 |
+
width: 100%;
|
| 447 |
+
margin-top: 15px;
|
| 448 |
+
margin-bottom: 20px;
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
th,
|
| 452 |
+
td {
|
| 453 |
+
border: 1px solid #e9ecef;
|
| 454 |
+
padding: 12px;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
th {
|
| 458 |
+
background-color: #f8f9fa;
|
| 459 |
+
color: #2c3e50;
|
| 460 |
+
font-weight: 600;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
.check-mark {
|
| 464 |
+
color: #27ae60;
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
.x-mark {
|
| 468 |
+
color: #e74c3c;
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
.field-name {
|
| 472 |
+
color: #000;
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
.field-stars {
|
| 476 |
+
color: #000;
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
.improvement {
|
| 480 |
+
color: #2c3e50;
|
| 481 |
+
background-color: #ecf0f1;
|
| 482 |
+
padding: 20px;
|
| 483 |
+
border-radius: 8px;
|
| 484 |
+
margin-bottom: 30px;
|
| 485 |
+
border-left: 4px solid #3498db;
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
.improvement-value {
|
| 489 |
+
color: #27ae60;
|
| 490 |
+
font-weight: bold;
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
.ai-badge {
|
| 494 |
+
background-color: #3498db;
|
| 495 |
+
color: white;
|
| 496 |
+
padding: 3px 8px;
|
| 497 |
+
border-radius: 3px;
|
| 498 |
+
font-size: 0.8em;
|
| 499 |
+
margin-left: 10px;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
/* Progress Bars */
|
| 503 |
+
.progress-container {
|
| 504 |
+
width: 100%;
|
| 505 |
+
background-color: #f1f1f1;
|
| 506 |
+
border-radius: 8px;
|
| 507 |
+
margin: 8px 0;
|
| 508 |
+
overflow: hidden;
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
.progress-bar {
|
| 512 |
+
height: 24px;
|
| 513 |
+
border-radius: 8px;
|
| 514 |
+
text-align: center;
|
| 515 |
+
line-height: 24px;
|
| 516 |
+
color: white;
|
| 517 |
+
font-size: 14px;
|
| 518 |
+
font-weight: 500;
|
| 519 |
+
display: flex;
|
| 520 |
+
align-items: center;
|
| 521 |
+
justify-content: center;
|
| 522 |
+
transition: width 0.5s ease;
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
.progress-excellent {
|
| 526 |
+
background-color: #4CAF50;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
.progress-good {
|
| 530 |
+
background-color: #2196F3;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
.progress-fair {
|
| 534 |
+
background-color: #FF9800;
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
.progress-poor {
|
| 538 |
+
background-color: #f44336;
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
.progress-excellent-border {
|
| 542 |
+
border-left-color: #4CAF50 !important;
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
.progress-good-border {
|
| 546 |
+
border-left-color: #2196F3 !important;
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
.progress-fair-border {
|
| 550 |
+
border-left-color: #FF9800 !important;
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
.progress-poor-border {
|
| 554 |
+
border-left-color: #f44336 !important;
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
.score-table {
|
| 558 |
+
width: 100%;
|
| 559 |
+
margin-bottom: 20px;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
.score-table th {
|
| 563 |
+
text-align: left;
|
| 564 |
+
padding: 12px;
|
| 565 |
+
background-color: #f8f9fa;
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
.score-table th:nth-child(1),
|
| 569 |
+
.score-table td:nth-child(1) {
|
| 570 |
+
width: 25%;
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
.score-table th:nth-child(2),
|
| 574 |
+
.score-table td:nth-child(2) {
|
| 575 |
+
width: 20%;
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
.score-table th:nth-child(3),
|
| 579 |
+
.score-table td:nth-child(3) {
|
| 580 |
+
width: 15%;
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
.score-table th:nth-child(4),
|
| 584 |
+
.score-table td:nth-child(4) {
|
| 585 |
+
width: 40%;
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
.score-weight {
|
| 589 |
+
font-size: 0.9em;
|
| 590 |
+
color: #666;
|
| 591 |
+
margin-left: 5px;
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
.score-label {
|
| 595 |
+
display: inline-block;
|
| 596 |
+
padding: 3px 8px;
|
| 597 |
+
border-radius: 4px;
|
| 598 |
+
color: white;
|
| 599 |
+
font-size: 0.9em;
|
| 600 |
+
margin-left: 5px;
|
| 601 |
+
background-color: transparent;
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
.total-score-container {
|
| 605 |
+
display: flex;
|
| 606 |
+
align-items: center;
|
| 607 |
+
margin-bottom: 25px;
|
| 608 |
+
background-color: white;
|
| 609 |
+
padding: 20px;
|
| 610 |
+
border-radius: 8px;
|
| 611 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
.total-score {
|
| 615 |
+
font-size: 28px;
|
| 616 |
+
font-weight: bold;
|
| 617 |
+
margin-right: 20px;
|
| 618 |
+
color: #2c3e50;
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
.total-progress {
|
| 622 |
+
flex: 1;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
.tooltip {
|
| 626 |
+
position: relative;
|
| 627 |
+
display: inline-block;
|
| 628 |
+
cursor: help;
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
.tooltip .tooltiptext {
|
| 632 |
+
visibility: hidden;
|
| 633 |
+
width: 300px;
|
| 634 |
+
background-color: #34495e;
|
| 635 |
+
color: #fff;
|
| 636 |
+
text-align: left;
|
| 637 |
+
border-radius: 6px;
|
| 638 |
+
padding: 12px;
|
| 639 |
+
position: absolute;
|
| 640 |
+
z-index: 1;
|
| 641 |
+
bottom: 125%;
|
| 642 |
+
left: 50%;
|
| 643 |
+
margin-left: -150px;
|
| 644 |
+
opacity: 0;
|
| 645 |
+
transition: opacity 0.3s;
|
| 646 |
+
font-size: 0.9em;
|
| 647 |
+
line-height: 1.5;
|
| 648 |
+
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.1);
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
.tooltip:hover .tooltiptext {
|
| 652 |
+
visibility: visible;
|
| 653 |
+
opacity: 1;
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
.tooltip .tooltiptext::after {
|
| 657 |
+
content: "";
|
| 658 |
+
position: absolute;
|
| 659 |
+
top: 100%;
|
| 660 |
+
left: 50%;
|
| 661 |
+
margin-left: -5px;
|
| 662 |
+
border-width: 5px;
|
| 663 |
+
border-style: solid;
|
| 664 |
+
border-color: #34495e transparent transparent transparent;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
.missing-fields {
|
| 668 |
+
background-color: #ffebee;
|
| 669 |
+
border-left: 4px solid #f44336;
|
| 670 |
+
padding: 20px;
|
| 671 |
+
margin: 20px 0;
|
| 672 |
+
border-radius: 8px;
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
.missing-fields h4 {
|
| 676 |
+
margin-top: 0;
|
| 677 |
+
color: #d32f2f;
|
| 678 |
+
margin-bottom: 15px;
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
.missing-fields ul {
|
| 682 |
+
margin-bottom: 0;
|
| 683 |
+
padding-left: 20px;
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
.recommendations {
|
| 687 |
+
background-color: #e8f5e9;
|
| 688 |
+
border-left: 4px solid #4caf50;
|
| 689 |
+
padding: 20px;
|
| 690 |
+
margin: 20px 0;
|
| 691 |
+
border-radius: 8px;
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
.recommendations h4 {
|
| 695 |
+
margin-top: 0;
|
| 696 |
+
color: #2e7d32;
|
| 697 |
+
margin-bottom: 15px;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
.recommendations ul {
|
| 701 |
+
margin-bottom: 0;
|
| 702 |
+
padding-left: 20px;
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
.importance-indicator {
|
| 706 |
+
display: inline-block;
|
| 707 |
+
margin-left: 5px;
|
| 708 |
+
}
|
| 709 |
+
|
| 710 |
+
.high-importance {
|
| 711 |
+
color: #d32f2f;
|
| 712 |
+
}
|
| 713 |
+
|
| 714 |
+
.medium-importance {
|
| 715 |
+
color: #ff9800;
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
.low-importance {
|
| 719 |
+
color: #2196f3;
|
| 720 |
+
}
|
| 721 |
+
|
| 722 |
+
.scoring-rubric {
|
| 723 |
+
background-color: #e3f2fd;
|
| 724 |
+
border-left: 4px solid #2196f3;
|
| 725 |
+
padding: 20px;
|
| 726 |
+
margin: 20px 0;
|
| 727 |
+
border-radius: 8px;
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
/* Error Pages */
|
| 731 |
+
.error-message {
|
| 732 |
+
text-align: left;
|
| 733 |
+
padding: 15px;
|
| 734 |
+
background-color: #f8d7da;
|
| 735 |
+
border: 1px solid #f5c6cb;
|
| 736 |
+
border-radius: 8px;
|
| 737 |
+
margin-bottom: 20px;
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
.error-message h2 {
|
| 741 |
+
margin: 0;
|
| 742 |
+
font-size: 18px;
|
| 743 |
+
color: #721c24;
|
| 744 |
+
font-weight: 500;
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
.error-section {
|
| 748 |
+
background-color: #ffffff;
|
| 749 |
+
border-radius: 8px;
|
| 750 |
+
padding: 25px;
|
| 751 |
+
margin-bottom: 30px;
|
| 752 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
.error-section h2 {
|
| 756 |
+
color: #e74c3c;
|
| 757 |
+
margin-top: 0;
|
| 758 |
+
margin-bottom: 20px;
|
| 759 |
+
font-size: 22px;
|
| 760 |
+
border-bottom: 2px solid #f0f0f0;
|
| 761 |
+
padding-bottom: 10px;
|
| 762 |
+
}
|
| 763 |
+
|
| 764 |
+
.error-details {
|
| 765 |
+
background-color: #ffebee;
|
| 766 |
+
border-left: 4px solid #e74c3c;
|
| 767 |
+
padding: 15px;
|
| 768 |
+
border-radius: 4px;
|
| 769 |
+
margin: 20px 0;
|
| 770 |
+
font-size: 16px;
|
| 771 |
+
line-height: 1.7;
|
| 772 |
+
color: #555;
|
| 773 |
+
}
|
| 774 |
+
|
| 775 |
+
/* Modern Footer Styles */
|
| 776 |
+
.footer-modern {
|
| 777 |
+
background-color: #1e293b;
|
| 778 |
+
color: #e2e8f0;
|
| 779 |
+
padding: 25px 30px 15px;
|
| 780 |
+
margin-top: 20px;
|
| 781 |
+
border-radius: 8px;
|
| 782 |
+
box-shadow: 0 -4px 6px -1px rgba(0, 0, 0, 0.1);
|
| 783 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
| 784 |
+
width: 100%;
|
| 785 |
+
box-sizing: border-box;
|
| 786 |
+
margin-bottom: 25px;
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
.footer-modern-container {
|
| 790 |
+
display: flex;
|
| 791 |
+
justify-content: space-between;
|
| 792 |
+
flex-wrap: wrap;
|
| 793 |
+
gap: 30px;
|
| 794 |
+
margin: 0 auto;
|
| 795 |
+
text-align: left;
|
| 796 |
+
}
|
| 797 |
+
|
| 798 |
+
.footer-modern-col {
|
| 799 |
+
flex: 1;
|
| 800 |
+
min-width: 200px;
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
.brand-col {
|
| 804 |
+
flex: 0 0 160px;
|
| 805 |
+
/* Takes up less space to bring Support closer */
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
.help-col,
|
| 809 |
+
.share-col {
|
| 810 |
+
padding-top: 8px;
|
| 811 |
+
/* Pushes content down slightly to align with GenAI logo */
|
| 812 |
+
display: flex;
|
| 813 |
+
flex-direction: column;
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
.footer-modern-col h4 {
|
| 817 |
+
color: #f8fafc;
|
| 818 |
+
font-family: 'Poppins', sans-serif;
|
| 819 |
+
font-size: 20px;
|
| 820 |
+
font-weight: 600;
|
| 821 |
+
margin-top: 0;
|
| 822 |
+
margin-bottom: 12px;
|
| 823 |
+
letter-spacing: 0.5px;
|
| 824 |
+
display: flex;
|
| 825 |
+
align-items: center;
|
| 826 |
+
gap: 8px;
|
| 827 |
+
}
|
| 828 |
+
|
| 829 |
+
.footer-modern-col p {
|
| 830 |
+
font-size: 14px;
|
| 831 |
+
line-height: 1.7;
|
| 832 |
+
color: #cbd5e1;
|
| 833 |
+
margin-bottom: 12px;
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
.footer-modern-col a {
|
| 837 |
+
color: #cbd5e1;
|
| 838 |
+
text-decoration: none;
|
| 839 |
+
transition: all 0.2s ease;
|
| 840 |
+
}
|
| 841 |
+
|
| 842 |
+
.footer-modern-col a:hover {
|
| 843 |
+
color: #38bdf8;
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
.footer-modern-col ul {
|
| 847 |
+
list-style: none;
|
| 848 |
+
padding: 0;
|
| 849 |
+
margin: 0;
|
| 850 |
+
display: flex;
|
| 851 |
+
flex-direction: column;
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
.footer-modern-col ul li {
|
| 855 |
+
margin-bottom: 12px;
|
| 856 |
+
}
|
| 857 |
+
|
| 858 |
+
.footer-modern-col ul a {
|
| 859 |
+
font-size: 14px;
|
| 860 |
+
font-weight: 500;
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
.footer-modern-col img {
|
| 864 |
+
filter: brightness(0) invert(1);
|
| 865 |
+
opacity: 0.9;
|
| 866 |
+
margin-bottom: 15px;
|
| 867 |
+
transition: opacity 0.2s;
|
| 868 |
+
}
|
| 869 |
+
|
| 870 |
+
.footer-modern-col img:hover {
|
| 871 |
+
opacity: 1;
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
.footer-social-icons {
|
| 875 |
+
display: flex;
|
| 876 |
+
gap: 15px;
|
| 877 |
+
margin-top: 5px;
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
.footer-social-icons a {
|
| 881 |
+
display: flex;
|
| 882 |
+
align-items: center;
|
| 883 |
+
justify-content: center;
|
| 884 |
+
width: 36px;
|
| 885 |
+
height: 36px;
|
| 886 |
+
border-radius: 50%;
|
| 887 |
+
background: linear-gradient(135deg, #334155, #475569);
|
| 888 |
+
color: #ffffff;
|
| 889 |
+
transition: all 0.3s ease;
|
| 890 |
+
box-shadow: 0 2px 4px rgba(51, 65, 85, 0.3);
|
| 891 |
+
}
|
| 892 |
+
|
| 893 |
+
.footer-social-icons a:hover {
|
| 894 |
+
background: linear-gradient(135deg, #475569, #64748b);
|
| 895 |
+
color: #ffffff;
|
| 896 |
+
transform: translateY(-2px);
|
| 897 |
+
box-shadow: 0 4px 8px rgba(51, 65, 85, 0.4);
|
| 898 |
+
}
|
| 899 |
+
|
| 900 |
+
.footer-btn-share {
|
| 901 |
+
display: inline-flex;
|
| 902 |
+
align-items: center;
|
| 903 |
+
justify-content: center;
|
| 904 |
+
padding: 0 16px;
|
| 905 |
+
height: 36px;
|
| 906 |
+
background: linear-gradient(135deg, #334155, #475569);
|
| 907 |
+
color: #ffffff !important;
|
| 908 |
+
font-weight: 500;
|
| 909 |
+
border-radius: 18px;
|
| 910 |
+
font-size: 14px;
|
| 911 |
+
margin-top: auto;
|
| 912 |
+
align-self: flex-end;
|
| 913 |
+
margin-right: 15px;
|
| 914 |
+
transition: all 0.3s ease;
|
| 915 |
+
text-decoration: none !important;
|
| 916 |
+
gap: 8px;
|
| 917 |
+
font-family: inherit;
|
| 918 |
+
cursor: pointer;
|
| 919 |
+
border: none;
|
| 920 |
+
box-shadow: 0 2px 4px rgba(51, 65, 85, 0.3);
|
| 921 |
+
}
|
| 922 |
+
|
| 923 |
+
.footer-btn-share:hover {
|
| 924 |
+
background: linear-gradient(135deg, #475569, #64748b);
|
| 925 |
+
color: #ffffff !important;
|
| 926 |
+
transform: translateY(-2px);
|
| 927 |
+
box-shadow: 0 4px 8px rgba(51, 65, 85, 0.4);
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
.footer-modern-bottom {
|
| 931 |
+
text-align: center;
|
| 932 |
+
padding-top: 10px;
|
| 933 |
+
margin-top: 15px;
|
| 934 |
+
border-top: 1px solid #334155;
|
| 935 |
+
color: #94a3b8;
|
| 936 |
+
font-size: 13px;
|
| 937 |
+
margin-left: auto;
|
| 938 |
+
margin-right: auto;
|
| 939 |
+
}
|
| 940 |
+
|
| 941 |
+
.footer-modern-bottom p {
|
| 942 |
+
margin: 5px 0 0 0;
|
| 943 |
+
}
|
| 944 |
+
|
| 945 |
+
/* Mobile Responsiveness */
|
| 946 |
+
@media (max-width: 768px) {
|
| 947 |
+
.container {
|
| 948 |
+
padding: 0 15px;
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
.header {
|
| 952 |
+
flex-direction: column;
|
| 953 |
+
text-align: center;
|
| 954 |
+
padding: 15px;
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
.header-left {
|
| 958 |
+
margin-bottom: 15px;
|
| 959 |
+
}
|
| 960 |
+
|
| 961 |
+
.header img {
|
| 962 |
+
margin-bottom: 10px;
|
| 963 |
+
margin-right: 0;
|
| 964 |
+
}
|
| 965 |
+
|
| 966 |
+
/* Index specific mobile */
|
| 967 |
+
form {
|
| 968 |
+
flex-direction: column !important;
|
| 969 |
+
align-items: stretch !important;
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
input[type="text"] {
|
| 973 |
+
width: 100% !important;
|
| 974 |
+
max-width: none !important;
|
| 975 |
+
margin-right: 0 !important;
|
| 976 |
+
margin-bottom: 15px;
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
button {
|
| 980 |
+
width: 100%;
|
| 981 |
+
}
|
| 982 |
+
|
| 983 |
+
/* Error specific mobile */
|
| 984 |
+
.button,
|
| 985 |
+
.generate-another-btn {
|
| 986 |
+
width: 100%;
|
| 987 |
+
text-align: center;
|
| 988 |
+
margin-bottom: 10px;
|
| 989 |
+
}
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
/* Missing Styles Restored */
|
| 993 |
+
.scoring-rubric h4 {
|
| 994 |
+
margin-top: 0;
|
| 995 |
+
color: #1565c0;
|
| 996 |
+
margin-bottom: 15px;
|
| 997 |
+
}
|
| 998 |
+
|
| 999 |
+
.scoring-rubric table {
|
| 1000 |
+
width: 100%;
|
| 1001 |
+
margin-top: 15px;
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
.scoring-rubric th,
|
| 1005 |
+
.scoring-rubric td {
|
| 1006 |
+
padding: 10px;
|
| 1007 |
+
text-align: left;
|
| 1008 |
+
}
|
| 1009 |
+
|
| 1010 |
+
.note-box {
|
| 1011 |
+
background-color: #fffbea;
|
| 1012 |
+
border-left: 4px solid #ffc107;
|
| 1013 |
+
padding: 20px;
|
| 1014 |
+
margin: 20px 0;
|
| 1015 |
+
border-radius: 8px;
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
.download-section {
|
| 1019 |
+
background-color: #e8f5e9;
|
| 1020 |
+
border-left: 4px solid #83af84;
|
| 1021 |
+
padding: 20px;
|
| 1022 |
+
margin-bottom: 20px;
|
| 1023 |
+
border-radius: 8px;
|
| 1024 |
+
display: flex;
|
| 1025 |
+
justify-content: space-between;
|
| 1026 |
+
align-items: center;
|
| 1027 |
+
flex-wrap: wrap;
|
| 1028 |
+
gap: 15px;
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
.download-section h3 {
|
| 1032 |
+
margin: 0;
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
.download-buttons {
|
| 1036 |
+
display: flex;
|
| 1037 |
+
gap: 15px;
|
| 1038 |
+
}
|
| 1039 |
+
|
| 1040 |
+
.download-buttons button {
|
| 1041 |
+
display: inline-flex;
|
| 1042 |
+
align-items: center;
|
| 1043 |
+
justify-content: center;
|
| 1044 |
+
padding: 0 14px;
|
| 1045 |
+
height: 32px;
|
| 1046 |
+
background-color: #64748b;
|
| 1047 |
+
color: #ffffff !important;
|
| 1048 |
+
font-weight: 600;
|
| 1049 |
+
border-radius: 16px;
|
| 1050 |
+
font-size: 13px;
|
| 1051 |
+
transition: all 0.3s ease;
|
| 1052 |
+
text-decoration: none !important;
|
| 1053 |
+
gap: 8px;
|
| 1054 |
+
font-family: inherit;
|
| 1055 |
+
cursor: pointer;
|
| 1056 |
+
border: none;
|
| 1057 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
.download-buttons button:hover {
|
| 1061 |
+
background-color: #94a3b8;
|
| 1062 |
+
color: #ffffff !important;
|
| 1063 |
+
transform: translateY(-2px);
|
| 1064 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
+
.completeness-profile {
|
| 1068 |
+
background-color: #f6f5f5;
|
| 1069 |
+
border-radius: 8px;
|
| 1070 |
+
padding: 20px;
|
| 1071 |
+
margin: 20px 0;
|
| 1072 |
+
border-left: 4px solid #7f7c7c;
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
.profile-badge {
|
| 1076 |
+
display: inline-flex;
|
| 1077 |
+
align-items: center;
|
| 1078 |
+
justify-content: center;
|
| 1079 |
+
height: 32px;
|
| 1080 |
+
padding: 0 14px;
|
| 1081 |
+
border-radius: 16px;
|
| 1082 |
+
color: white;
|
| 1083 |
+
font-weight: 600;
|
| 1084 |
+
font-size: 14px;
|
| 1085 |
+
margin-right: 10px;
|
| 1086 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
| 1087 |
+
box-sizing: border-box;
|
| 1088 |
+
}
|
| 1089 |
+
|
| 1090 |
+
.profile-basic {
|
| 1091 |
+
background: linear-gradient(135deg, rgb(255, 140, 0), rgb(255, 180, 50));
|
| 1092 |
+
box-shadow: 0 2px 4px rgba(255, 140, 0, 0.3);
|
| 1093 |
+
}
|
| 1094 |
+
|
| 1095 |
+
.profile-standard {
|
| 1096 |
+
background-color: #2196f3;
|
| 1097 |
+
}
|
| 1098 |
+
|
| 1099 |
+
.profile-advanced {
|
| 1100 |
+
background-color: #4caf50;
|
| 1101 |
+
}
|
| 1102 |
+
|
| 1103 |
+
.profile-incomplete {
|
| 1104 |
+
background-color: #f44336;
|
| 1105 |
+
color: white;
|
| 1106 |
+
}
|
| 1107 |
+
|
| 1108 |
+
.field-tier {
|
| 1109 |
+
display: inline-block;
|
| 1110 |
+
width: 12px;
|
| 1111 |
+
height: 12px;
|
| 1112 |
+
border-radius: 50%;
|
| 1113 |
+
margin-right: 5px;
|
| 1114 |
+
}
|
| 1115 |
+
|
| 1116 |
+
.tier-critical {
|
| 1117 |
+
background-color: #d32f2f;
|
| 1118 |
+
}
|
| 1119 |
+
|
| 1120 |
+
.tier-important {
|
| 1121 |
+
background-color: #ff9800;
|
| 1122 |
+
}
|
| 1123 |
+
|
| 1124 |
+
.tier-supplementary {
|
| 1125 |
+
background-color: #2196f3;
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
.tier-legend {
|
| 1129 |
+
display: flex;
|
| 1130 |
+
margin: 15px 0;
|
| 1131 |
+
font-size: 0.9em;
|
| 1132 |
+
}
|
| 1133 |
+
|
| 1134 |
+
.tier-legend-item {
|
| 1135 |
+
display: flex;
|
| 1136 |
+
align-items: center;
|
| 1137 |
+
margin-right: 20px;
|
| 1138 |
+
}
|
| 1139 |
+
|
| 1140 |
+
.validation-penalty-info {
|
| 1141 |
+
background-color: #fff3e0;
|
| 1142 |
+
border-left: 4px solid #ff9800;
|
| 1143 |
+
padding: 20px;
|
| 1144 |
+
margin: 20px 0;
|
| 1145 |
+
border-radius: 8px;
|
| 1146 |
+
font-size: 0.95em;
|
| 1147 |
+
}
|
| 1148 |
+
|
| 1149 |
+
.validation-penalty-info h4 {
|
| 1150 |
+
margin-top: 0;
|
| 1151 |
+
color: #e65100;
|
| 1152 |
+
margin-bottom: 15px;
|
| 1153 |
+
}
|
| 1154 |
+
|
| 1155 |
+
.validation-warning-box {
|
| 1156 |
+
background-color: #fff3e0;
|
| 1157 |
+
border: 1px solid #ff9800;
|
| 1158 |
+
border-left: 4px solid #ff9800;
|
| 1159 |
+
border-radius: 8px;
|
| 1160 |
+
padding: 20px;
|
| 1161 |
+
margin: 20px 0;
|
| 1162 |
+
box-shadow: 0 2px 10px rgba(255, 152, 0, 0.1);
|
| 1163 |
+
}
|
| 1164 |
+
|
| 1165 |
+
.validation-warning-box h4 {
|
| 1166 |
+
margin-top: 0;
|
| 1167 |
+
color: #e65100;
|
| 1168 |
+
margin-bottom: 15px;
|
| 1169 |
+
display: flex;
|
| 1170 |
+
align-items: center;
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
.validation-warning-box .warning-icon {
|
| 1174 |
+
margin-right: 10px;
|
| 1175 |
+
font-size: 1.2em;
|
| 1176 |
+
}
|
| 1177 |
+
|
| 1178 |
+
.validation-warning-box .issue-summary {
|
| 1179 |
+
margin-bottom: 15px;
|
| 1180 |
+
line-height: 1.6;
|
| 1181 |
+
}
|
| 1182 |
+
|
| 1183 |
+
.validation-warning-box .issue-details {
|
| 1184 |
+
margin-bottom: 15px;
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
.validation-warning-box .issue-list {
|
| 1188 |
+
margin: 10px 0;
|
| 1189 |
+
padding-left: 20px;
|
| 1190 |
+
}
|
| 1191 |
+
|
| 1192 |
+
.validation-warning-box .issue-list li {
|
| 1193 |
+
margin-bottom: 8px;
|
| 1194 |
+
line-height: 1.5;
|
| 1195 |
+
}
|
| 1196 |
+
|
| 1197 |
+
.validation-warning-box .call-to-action {
|
| 1198 |
+
margin-top: 15px;
|
| 1199 |
+
padding-top: 15px;
|
| 1200 |
+
border-top: 1px solid #ffcc80;
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
.validation-warning-box .call-to-action p {
|
| 1204 |
+
margin-bottom: 10px;
|
| 1205 |
+
}
|
| 1206 |
+
|
| 1207 |
+
.issue-tracker-link {
|
| 1208 |
+
display: inline-block;
|
| 1209 |
+
padding: 8px 16px;
|
| 1210 |
+
background-color: #3498db;
|
| 1211 |
+
color: white;
|
| 1212 |
+
text-decoration: none;
|
| 1213 |
+
border-radius: 4px;
|
| 1214 |
+
font-weight: 500;
|
| 1215 |
+
transition: background-color 0.3s;
|
| 1216 |
+
}
|
| 1217 |
+
|
| 1218 |
+
.issue-tracker-link:hover {
|
| 1219 |
+
background-color: #2980b9;
|
| 1220 |
+
text-decoration: none;
|
| 1221 |
+
}
|
| 1222 |
+
|
| 1223 |
+
.category-table {
|
| 1224 |
+
margin-bottom: 30px;
|
| 1225 |
+
}
|
| 1226 |
+
|
| 1227 |
+
.category-table h4 {
|
| 1228 |
+
color: #2c3e50;
|
| 1229 |
+
margin-bottom: 10px;
|
| 1230 |
+
font-size: 18px;
|
| 1231 |
+
}
|
| 1232 |
+
|
| 1233 |
+
.category-table table th:first-child,
|
| 1234 |
+
.category-table table td:first-child {
|
| 1235 |
+
text-align: center;
|
| 1236 |
+
vertical-align: middle;
|
| 1237 |
+
width: 1%;
|
| 1238 |
+
white-space: nowrap;
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
.category-table table th:nth-child(3),
|
| 1242 |
+
.category-table table td:nth-child(3) {
|
| 1243 |
+
word-break: break-all;
|
| 1244 |
+
overflow-wrap: break-word;
|
| 1245 |
+
}
|
| 1246 |
+
|
| 1247 |
+
.category-table table th:nth-child(4),
|
| 1248 |
+
.category-table table td:nth-child(4) {
|
| 1249 |
+
width: 1%;
|
| 1250 |
+
white-space: nowrap;
|
| 1251 |
+
}
|
| 1252 |
+
|
| 1253 |
+
.category-table table th:nth-child(5),
|
| 1254 |
+
.category-table table td:nth-child(5) {
|
| 1255 |
+
width: 1%;
|
| 1256 |
+
white-space: nowrap;
|
| 1257 |
+
text-align: center;
|
| 1258 |
+
vertical-align: middle;
|
| 1259 |
+
}
|
| 1260 |
+
|
| 1261 |
+
.category-result {
|
| 1262 |
+
background-color: #f8f9fa;
|
| 1263 |
+
padding: 10px;
|
| 1264 |
+
border-radius: 4px;
|
| 1265 |
+
margin-top: 10px;
|
| 1266 |
+
font-weight: bold;
|
| 1267 |
+
}
|
| 1268 |
+
|
| 1269 |
+
.field-type-legend {
|
| 1270 |
+
background-color: #e3f2fd;
|
| 1271 |
+
border-left: 4px solid #2196f3;
|
| 1272 |
+
padding: 15px;
|
| 1273 |
+
margin: 20px 0;
|
| 1274 |
+
border-radius: 8px;
|
| 1275 |
+
font-size: 0.9em;
|
| 1276 |
+
}
|
| 1277 |
+
|
| 1278 |
+
.field-type-legend h4 {
|
| 1279 |
+
margin-top: 0;
|
| 1280 |
+
color: #1565c0;
|
| 1281 |
+
margin-bottom: 10px;
|
| 1282 |
+
}
|
| 1283 |
+
|
| 1284 |
+
.legend-item {
|
| 1285 |
+
display: inline-block;
|
| 1286 |
+
margin-right: 20px;
|
| 1287 |
+
margin-bottom: 5px;
|
| 1288 |
+
}
|
src/static/images/cdx.webp
ADDED
|
src/static/images/genai_security_project_logo.webp
ADDED
|
src/static/js/script.js
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// OWASP AIBOM Generator - Common Scripts
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
// Add Enter key support for form submission (Index Page)
|
| 5 |
+
document.addEventListener('DOMContentLoaded', function () {
|
| 6 |
+
var modelInput = document.querySelector('input[name="model_id"]');
|
| 7 |
+
if (modelInput) {
|
| 8 |
+
modelInput.addEventListener('keypress', function (e) {
|
| 9 |
+
if (e.key === 'Enter') {
|
| 10 |
+
e.preventDefault();
|
| 11 |
+
var btn = document.getElementById('generate-button');
|
| 12 |
+
if (btn) btn.click();
|
| 13 |
+
}
|
| 14 |
+
});
|
| 15 |
+
}
|
| 16 |
+
});
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
/* === Result Page Functions === */
|
| 20 |
+
|
| 21 |
+
function switchTab(tabId) {
|
| 22 |
+
// Hide all tab contents
|
| 23 |
+
var tabContents = document.getElementsByClassName('tab-content');
|
| 24 |
+
for (var i = 0; i < tabContents.length; i++) {
|
| 25 |
+
tabContents[i].classList.remove('active');
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
// Deactivate all tabs
|
| 29 |
+
var tabs = document.getElementsByClassName('aibom-tab');
|
| 30 |
+
for (var i = 0; i < tabs.length; i++) {
|
| 31 |
+
tabs[i].classList.remove('active');
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// Activate the selected tab and content
|
| 35 |
+
var content = document.getElementById(tabId);
|
| 36 |
+
if (content) content.classList.add('active');
|
| 37 |
+
|
| 38 |
+
var selectedTab = document.querySelector('.aibom-tab[onclick="switchTab(\'' + tabId + '\')"]');
|
| 39 |
+
if (selectedTab) selectedTab.classList.add('active');
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
function toggleCollapsible(element) {
|
| 43 |
+
element.classList.toggle('active');
|
| 44 |
+
var content = element.nextElementSibling;
|
| 45 |
+
if (content) {
|
| 46 |
+
content.classList.toggle('active');
|
| 47 |
+
|
| 48 |
+
if (content.classList.contains('active')) {
|
| 49 |
+
content.style.maxHeight = content.scrollHeight + 'px';
|
| 50 |
+
} else {
|
| 51 |
+
content.style.maxHeight = '0';
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
/**
|
| 57 |
+
* Downloads a JSON object as a file.
|
| 58 |
+
* @param {Object|string} content - The JSON object or string to download.
|
| 59 |
+
* @param {string} filename - The name of the file to save as.
|
| 60 |
+
*/
|
| 61 |
+
function downloadJSON(content, filename) {
|
| 62 |
+
var jsonString = (typeof content === 'string') ? content : JSON.stringify(content, null, 2);
|
| 63 |
+
var dataStr = "data:text/json;charset=utf-8," + encodeURIComponent(jsonString);
|
| 64 |
+
|
| 65 |
+
var downloadAnchorNode = document.createElement('a');
|
| 66 |
+
downloadAnchorNode.setAttribute("href", dataStr);
|
| 67 |
+
downloadAnchorNode.setAttribute("download", filename || "aibom.json");
|
| 68 |
+
document.body.appendChild(downloadAnchorNode); // required for firefox
|
| 69 |
+
downloadAnchorNode.click();
|
| 70 |
+
downloadAnchorNode.remove();
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
// Initialize collapsible sections (Result Page)
|
| 74 |
+
document.addEventListener('DOMContentLoaded', function () {
|
| 75 |
+
var collapsibles = document.getElementsByClassName('collapsible');
|
| 76 |
+
for (var i = 0; i < collapsibles.length; i++) {
|
| 77 |
+
// Remove existing onclick to avoid double firing if inline remains,
|
| 78 |
+
// but cleaner to attach here if not attached inline.
|
| 79 |
+
// However, HTML onclick="toggleCollapsible(this)" is common pattern.
|
| 80 |
+
// If the HTML has onclick, we don't need addEventListener here unless we remove onclick from HTML.
|
| 81 |
+
// For now, let's assumes HTML calls toggleCollapsible(this).
|
| 82 |
+
// Initialization of state might be needed though.
|
| 83 |
+
}
|
| 84 |
+
// If elements start collapsed, no JS init needed other than event handlers.
|
| 85 |
+
});
|
| 86 |
+
|
| 87 |
+
// Validate Hugging Face URL or Model ID (Index Page)
|
| 88 |
+
document.addEventListener('DOMContentLoaded', function () {
|
| 89 |
+
var modelInput = document.getElementById('model-input');
|
| 90 |
+
var generateButton = document.getElementById('generate-button');
|
| 91 |
+
|
| 92 |
+
if (modelInput && generateButton) {
|
| 93 |
+
function validateInput() {
|
| 94 |
+
var value = modelInput.value.trim();
|
| 95 |
+
// Check if it's a valid HF URL (starts with https://huggingface.co/)
|
| 96 |
+
// OR a valid org/repo identifier (e.g. openai/whisper-tiny)
|
| 97 |
+
var isUrl = value.startsWith('https://huggingface.co/');
|
| 98 |
+
// Basic regex for org/repo: alphanumeric, dots, dashes, underscores
|
| 99 |
+
var isModelId = /^[a-zA-Z0-9_\-\.]+\/[a-zA-Z0-9_\-\.]+$/.test(value);
|
| 100 |
+
|
| 101 |
+
if (isUrl || isModelId) {
|
| 102 |
+
generateButton.disabled = false;
|
| 103 |
+
generateButton.style.cursor = 'pointer';
|
| 104 |
+
generateButton.style.opacity = '1';
|
| 105 |
+
} else {
|
| 106 |
+
generateButton.disabled = true;
|
| 107 |
+
generateButton.style.cursor = 'not-allowed';
|
| 108 |
+
generateButton.style.opacity = '0.6';
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
modelInput.addEventListener('input', validateInput);
|
| 113 |
+
// Initial check
|
| 114 |
+
validateInput();
|
| 115 |
+
}
|
| 116 |
+
});
|
src/templates/error.html
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>AIBOM Generator - Error</title>
|
| 8 |
+
|
| 9 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
| 10 |
+
</head>
|
| 11 |
+
|
| 12 |
+
<body>
|
| 13 |
+
<div class="container">
|
| 14 |
+
|
| 15 |
+
<!-- Header -->
|
| 16 |
+
{% include 'includes/header.html' %}
|
| 17 |
+
|
| 18 |
+
<!-- Error message -->
|
| 19 |
+
<div class="error-message">
|
| 20 |
+
<h2>❌ Error Generating AIBOM</h2>
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
<!-- Try Again Button -->
|
| 24 |
+
<div style="text-align: left; margin-bottom: 20px;">
|
| 25 |
+
<a href="/" class="button">🔄 Try Again</a>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
<!-- Error Details -->
|
| 29 |
+
<div class="error-section">
|
| 30 |
+
<h2>What Happened?</h2>
|
| 31 |
+
<div class="error-details">
|
| 32 |
+
<p>{{ error }}</p>
|
| 33 |
+
</div>
|
| 34 |
+
</div>
|
| 35 |
+
|
| 36 |
+
<!-- Common Solutions -->
|
| 37 |
+
<div class="content-section">
|
| 38 |
+
<h2>💡 Common Solutions</h2>
|
| 39 |
+
<p><strong>Model not found:</strong> Check that the model ID follows <code>owner/model-name</code> format
|
| 40 |
+
and exists on Hugging Face.</p>
|
| 41 |
+
<p><strong>Access issues:</strong> Some models require an access token or may be private.</p>
|
| 42 |
+
<p><strong>Temporary issues:</strong> Try again if there were connectivity or Hugging Face API hiccups.</p>
|
| 43 |
+
</div>
|
| 44 |
+
|
| 45 |
+
<!-- Modern Footer -->
|
| 46 |
+
{% include 'includes/footer.html' %}
|
| 47 |
+
|
| 48 |
+
</div>
|
| 49 |
+
</body>
|
| 50 |
+
|
| 51 |
+
</html>
|
src/templates/includes/footer.html
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<footer class="footer-modern">
|
| 2 |
+
<div class="footer-modern-container">
|
| 3 |
+
<!-- Brand Column -->
|
| 4 |
+
<div class="footer-modern-col brand-col">
|
| 5 |
+
<a href="https://genai.owasp.org/" target="_blank">
|
| 6 |
+
<img src="{{ static_root|default('/static') }}/images/genai_security_project_logo.webp"
|
| 7 |
+
alt="OWASP GenAI Security Project" height="45">
|
| 8 |
+
</a>
|
| 9 |
+
<div class="footer-social-icons">
|
| 10 |
+
<a href="https://github.com/GenAI-Security-Project/aibom-generator" target="_blank"
|
| 11 |
+
rel="noopener noreferrer" aria-label="GitHub">
|
| 12 |
+
<svg viewBox="0 0 24 24" width="22" height="22" stroke="currentColor" stroke-width="2" fill="none"
|
| 13 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 14 |
+
<path
|
| 15 |
+
d="M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22">
|
| 16 |
+
</path>
|
| 17 |
+
</svg>
|
| 18 |
+
</a>
|
| 19 |
+
<a href="https://www.linkedin.com/company/owasp-aibom/" target="_blank" rel="noopener noreferrer"
|
| 20 |
+
aria-label="LinkedIn">
|
| 21 |
+
<svg viewBox="0 0 24 24" width="22" height="22" stroke="currentColor" stroke-width="2" fill="none"
|
| 22 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 23 |
+
<path d="M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"></path>
|
| 24 |
+
<rect x="2" y="9" width="4" height="12"></rect>
|
| 25 |
+
<circle cx="4" cy="4" r="2"></circle>
|
| 26 |
+
</svg>
|
| 27 |
+
</a>
|
| 28 |
+
</div>
|
| 29 |
+
</div>
|
| 30 |
+
|
| 31 |
+
<!-- Help Column -->
|
| 32 |
+
<div class="footer-modern-col help-col">
|
| 33 |
+
<h4>
|
| 34 |
+
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"
|
| 35 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 36 |
+
<path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z"></path>
|
| 37 |
+
</svg>
|
| 38 |
+
Support
|
| 39 |
+
</h4>
|
| 40 |
+
<p>If you encountered any problems, found a bug, or have suggestions for improvement, we'd love to hear from
|
| 41 |
+
you!</p>
|
| 42 |
+
<a href="https://github.com/GenAI-Security-Project/aibom-generator/issues" target="_blank"
|
| 43 |
+
title="Report an Issue" aria-label="Report an Issue" class="footer-btn-share">
|
| 44 |
+
<svg viewBox="0 0 24 24" width="18" height="18" stroke="currentColor" stroke-width="2" fill="none"
|
| 45 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 46 |
+
<path d="M10.29 3.86L1.82 18a2 2 0 0 0 1.71 3h16.94a2 2 0 0 0 1.71-3L13.71 3.86a2 2 0 0 0-3.42 0z">
|
| 47 |
+
</path>
|
| 48 |
+
<line x1="12" y1="9" x2="12" y2="13"></line>
|
| 49 |
+
<line x1="12" y1="17" x2="12.01" y2="17"></line>
|
| 50 |
+
</svg>
|
| 51 |
+
<span style="font-weight: 700;">Report Issue</span>
|
| 52 |
+
</a>
|
| 53 |
+
</div>
|
| 54 |
+
|
| 55 |
+
<!-- Share Column -->
|
| 56 |
+
<div class="footer-modern-col share-col">
|
| 57 |
+
<h4>
|
| 58 |
+
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"
|
| 59 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 60 |
+
<circle cx="18" cy="5" r="3"></circle>
|
| 61 |
+
<circle cx="6" cy="12" r="3"></circle>
|
| 62 |
+
<circle cx="18" cy="19" r="3"></circle>
|
| 63 |
+
<line x1="8.59" y1="13.51" x2="15.42" y2="17.49"></line>
|
| 64 |
+
<line x1="15.41" y1="6.51" x2="8.59" y2="10.49"></line>
|
| 65 |
+
</svg>
|
| 66 |
+
Spread the Word
|
| 67 |
+
</h4>
|
| 68 |
+
<p>If you find this tool useful, share it with your network!</p>
|
| 69 |
+
<a href="https://www.linkedin.com/sharing/share-offsite/?url=https://www.linkedin.com/company/owasp-aibom/"
|
| 70 |
+
target="_blank" rel="noopener noreferrer" title="Share" aria-label="Share" class="footer-btn-share">
|
| 71 |
+
<svg viewBox="0 0 24 24" width="18" height="18" stroke="currentColor" stroke-width="2" fill="none"
|
| 72 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 73 |
+
<path d="M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"></path>
|
| 74 |
+
<rect x="2" y="9" width="4" height="12"></rect>
|
| 75 |
+
<circle cx="4" cy="4" r="2"></circle>
|
| 76 |
+
</svg>
|
| 77 |
+
<span style="font-weight: 700;">Share</span>
|
| 78 |
+
</a>
|
| 79 |
+
</div>
|
| 80 |
+
</div>
|
| 81 |
+
|
| 82 |
+
<div class="footer-modern-bottom">
|
| 83 |
+
<p>© 2026 OWASP GenAI Security Project - AIBOM Initiative</p>
|
| 84 |
+
</div>
|
| 85 |
+
</footer>
|
src/templates/includes/header.html
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div class="header">
|
| 2 |
+
<div class="header-left">
|
| 3 |
+
<a href="https://genai.owasp.org/" target="_blank">
|
| 4 |
+
<img src="{{ static_root|default('/static') }}/images/genai_security_project_logo.webp"
|
| 5 |
+
alt="OWASP GenAI Security Project logo">
|
| 6 |
+
</a>
|
| 7 |
+
</div>
|
| 8 |
+
<div class="header-content">
|
| 9 |
+
<h1>AIBOM Generator</h1>
|
| 10 |
+
</div>
|
| 11 |
+
{% if not hide_generate_another %}
|
| 12 |
+
<div class="header-right">
|
| 13 |
+
<a href="/" class="generate-another-btn">
|
| 14 |
+
<svg viewBox="0 0 24 24" width="16" height="16" stroke="currentColor" stroke-width="2" fill="none"
|
| 15 |
+
stroke-linecap="round" stroke-linejoin="round">
|
| 16 |
+
<path
|
| 17 |
+
d="m12 3-1.912 5.813a2 2 0 0 1-1.275 1.275L3 12l5.813 1.912a2 2 0 0 1 1.275 1.275L12 21l1.912-5.813a2 2 0 0 1 1.275-1.275L21 12l-5.813-1.912a2 2 0 0 1-1.275-1.275L12 3Z">
|
| 18 |
+
</path>
|
| 19 |
+
<path d="M5 3v4"></path>
|
| 20 |
+
<path d="M19 17v4"></path>
|
| 21 |
+
<path d="M3 5h4"></path>
|
| 22 |
+
<path d="M17 19h4"></path>
|
| 23 |
+
</svg>
|
| 24 |
+
Generate Another AIBOM
|
| 25 |
+
</a>
|
| 26 |
+
</div>
|
| 27 |
+
{% endif %}
|
| 28 |
+
</div>
|
src/templates/index.html
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>AIBOM Generator</title>
|
| 8 |
+
<link rel="stylesheet" href="/static/css/style.css?v=2.0">
|
| 9 |
+
</head>
|
| 10 |
+
|
| 11 |
+
<body>
|
| 12 |
+
<div class="container">
|
| 13 |
+
<!-- Header -->
|
| 14 |
+
{% set hide_generate_another = true %}
|
| 15 |
+
{% include 'includes/header.html' %}
|
| 16 |
+
</div>
|
| 17 |
+
|
| 18 |
+
<div class="container">
|
| 19 |
+
<!-- Form Section (Moved to top) -->
|
| 20 |
+
<div class="form-section">
|
| 21 |
+
<h2>Generate AIBOM</h2>
|
| 22 |
+
<p>
|
| 23 |
+
Enter a model on Hugging Face, in a format
|
| 24 |
+
<code><organization-or-username>/<model-name></code> (easy copy button), or model's URL, to
|
| 25 |
+
generate AIBOM in CycloneDX format. You can browse available models in the <a
|
| 26 |
+
href="https://huggingface.co/models" target="_blank" rel="noopener noreferrer">Hugging Face models
|
| 27 |
+
repository</a>.
|
| 28 |
+
</p>
|
| 29 |
+
<form id="sbom-form" action="/generate" method="post"
|
| 30 |
+
style="display: flex; flex-direction: row; align-items: center; width: 100%;">
|
| 31 |
+
<input type="text" name="model_id" id="model-input" placeholder="e.g., openai/whisper-tiny" required
|
| 32 |
+
style="flex: 1; max-width: 70%; margin-right: 10px;">
|
| 33 |
+
<button type="submit" id="generate-button" disabled
|
| 34 |
+
onclick="this.disabled=true; this.innerText='Generating...'; document.getElementById('sbom-form').submit();">Generate
|
| 35 |
+
AIBOM</button>
|
| 36 |
+
</form>
|
| 37 |
+
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
<!-- Tool Description Section -->
|
| 41 |
+
<div class="content-section">
|
| 42 |
+
<h2>About This Tool</h2>
|
| 43 |
+
<p>This open-source tool generates AIBOM (AI Bill of Materials) for models hosted on Hugging Face. It
|
| 44 |
+
automatically extracts and formats key information about AI models into a standardized, machine-readable
|
| 45 |
+
SBOM (Software Bill of Materials) using the CycloneDX JSON format. Because metadata quality varies
|
| 46 |
+
across models and much of the information is unstructured, the tool analyzes what is available,
|
| 47 |
+
organizes it into a consistent structure, and provides an AIBOM completeness score that evaluates how
|
| 48 |
+
well the model is documented. This helps users quickly understand documentation gaps and supports
|
| 49 |
+
transparency, security, and compliance. The tool is also listed on <a
|
| 50 |
+
href="https://cyclonedx.org/tool-center/" target="_blank" rel="noopener noreferrer">CycloneDX Tool
|
| 51 |
+
Center</a>.</p>
|
| 52 |
+
</div>
|
| 53 |
+
|
| 54 |
+
<!-- Introduction Section -->
|
| 55 |
+
<div class="content-section">
|
| 56 |
+
<h2>Understanding AIBOMs</h2>
|
| 57 |
+
<p>An AIBOM (Artificial Intelligence Bill of Materials, also known as AI/ML-BOM, AI SBOM, or SBOM for AI) is
|
| 58 |
+
a detailed, structured inventory that lists the components and dependencies involved in building and
|
| 59 |
+
operating an AI system—such as pre-trained models, datasets, libraries, and configuration parameters.
|
| 60 |
+
Much like a traditional SBOM for software, an AIBOM brings transparency to what goes into an AI system,
|
| 61 |
+
enabling organizations to assess security, compliance, and ethical risks. It is essential for managing
|
| 62 |
+
AI supply chain risks, supporting regulatory requirements, ensuring model provenance, and enabling
|
| 63 |
+
incident response and audits. As AI systems grow more complex and widely adopted, AIBOMs become critical
|
| 64 |
+
for maintaining trust, accountability, and control over how AI technologies are developed, integrated,
|
| 65 |
+
and deployed.</p>
|
| 66 |
+
</div>
|
| 67 |
+
|
| 68 |
+
<!-- Modern Footer -->
|
| 69 |
+
{% include 'includes/footer.html' %}
|
| 70 |
+
</div>
|
| 71 |
+
|
| 72 |
+
<!-- JavaScript for loading indicator and Captcha -->
|
| 73 |
+
<script src="/static/js/script.js"></script>
|
| 74 |
+
</body>
|
| 75 |
+
|
| 76 |
+
</html>
|
src/templates/result.html
ADDED
|
@@ -0,0 +1,845 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>AIBOM Generated</title>
|
| 8 |
+
<link rel="stylesheet" href="{{ static_root|default('/static') }}/css/style.css?v=2.0">
|
| 9 |
+
|
| 10 |
+
</head>
|
| 11 |
+
|
| 12 |
+
<body>
|
| 13 |
+
<div class="container">
|
| 14 |
+
<!-- Header -->
|
| 15 |
+
{% include 'includes/header.html' %}
|
| 16 |
+
|
| 17 |
+
<!-- Success Message
|
| 18 |
+
<div class="success-message">
|
| 19 |
+
<h2>✅ AIBOM is Generated Successfully for <span class="model-name">{{ model_id }}</span></h2>
|
| 20 |
+
</div> -->
|
| 21 |
+
|
| 22 |
+
<!-- Key Information -->
|
| 23 |
+
<div class="key-info">
|
| 24 |
+
<h3>📋 AIBOM Summary</h3>
|
| 25 |
+
<div class="aibom-property">
|
| 26 |
+
<span class="property-name">Model:</span>
|
| 27 |
+
<span class="property-value"><a href="https://huggingface.co/{{ model_id }}" target="_blank">{{ model_id
|
| 28 |
+
}}</a></span>
|
| 29 |
+
</div>
|
| 30 |
+
<div class="aibom-property">
|
| 31 |
+
<span class="property-name">Generated:</span>
|
| 32 |
+
<span class="property-value">{{ aibom.metadata.timestamp }}</span>
|
| 33 |
+
</div>
|
| 34 |
+
<div class="aibom-property">
|
| 35 |
+
<span class="property-name">SBOM Format:</span>
|
| 36 |
+
<span class="property-value">
|
| 37 |
+
<a href="https://cyclonedx.org/docs/1.6/json/#components_items_modelCard" target="_blank">{{
|
| 38 |
+
aibom.bomFormat }} {{ aibom.specVersion }}</a>,
|
| 39 |
+
<a href="https://cyclonedx.org/docs/1.7/json/#components_items_modelCard" target="_blank">{{
|
| 40 |
+
aibom.bomFormat }} 1.7</a>
|
| 41 |
+
</span>
|
| 42 |
+
</div>
|
| 43 |
+
<div class="aibom-property">
|
| 44 |
+
<span class="property-name">Serial Number:</span>
|
| 45 |
+
<span class="property-value">{{ aibom.serialNumber }}</span>
|
| 46 |
+
</div>
|
| 47 |
+
</div>
|
| 48 |
+
|
| 49 |
+
<!-- Calculate Shared Score State -->
|
| 50 |
+
{% set score_percent = (completeness_score.total_score if completeness_score.total_score != 'Undefined' else 0)
|
| 51 |
+
| float %}
|
| 52 |
+
{% if score_percent >= 90 %}
|
| 53 |
+
{% set score_class = 'progress-excellent' %}
|
| 54 |
+
{% set score_label = 'Excellent' %}
|
| 55 |
+
{% elif score_percent >= 70 %}
|
| 56 |
+
{% set score_class = 'progress-good' %}
|
| 57 |
+
{% set score_label = 'Good' %}
|
| 58 |
+
{% elif score_percent >= 50 %}
|
| 59 |
+
{% set score_class = 'progress-fair' %}
|
| 60 |
+
{% set score_label = 'Fair' %}
|
| 61 |
+
{% else %}
|
| 62 |
+
{% set score_class = 'progress-poor' %}
|
| 63 |
+
{% set score_label = 'Poor' %}
|
| 64 |
+
{% endif %}
|
| 65 |
+
|
| 66 |
+
<!-- Completeness Profile & Download Section -->
|
| 67 |
+
<div class="completeness-profile {{ score_class }}-border"
|
| 68 |
+
style="display: flex; flex-wrap: wrap; gap: 0; align-items: center;">
|
| 69 |
+
<div class="completeness-left"
|
| 70 |
+
style="flex: 1 1 50%; min-width: 300px; padding-right: 20px; box-sizing: border-box;">
|
| 71 |
+
{% if completeness_score.completeness_profile %}
|
| 72 |
+
<h3 style="margin-top:0; margin-bottom:15px;">📊 Completeness Assessment</h3>
|
| 73 |
+
<div style="display: flex; gap: 15px; align-items: center;">
|
| 74 |
+
<span class="profile-badge profile-{{ completeness_score.completeness_profile.name|lower }}">
|
| 75 |
+
{{ completeness_score.completeness_profile.name }}
|
| 76 |
+
</span>
|
| 77 |
+
<span>{{ completeness_score.completeness_profile.description }}</span>
|
| 78 |
+
</div>
|
| 79 |
+
{% endif %}
|
| 80 |
+
</div>
|
| 81 |
+
|
| 82 |
+
<div class="completeness-right" style="flex: 1 1 50%; min-width: 300px; box-sizing: border-box;">
|
| 83 |
+
<div class="download-section-inner">
|
| 84 |
+
<h3 style="margin-top:0; margin-bottom:15px;">💾 Download your AIBOM</h3>
|
| 85 |
+
<div class="download-buttons" style="display: flex; gap: 10px;">
|
| 86 |
+
<button onclick="downloadJSON(AIBOM_CDX_JSON_1_6, FILENAME_BASE + '_aibom_1_6.json')">
|
| 87 |
+
<img src="{{ static_root|default('/static') }}/images/cdx.webp" alt="CycloneDX Logo"
|
| 88 |
+
width="16" height="16" style="filter: brightness(0) invert(1);">
|
| 89 |
+
CycloneDX 1.6
|
| 90 |
+
</button>
|
| 91 |
+
<button onclick="downloadJSON(AIBOM_CDX_JSON_1_7, FILENAME_BASE + '_aibom_1_7.json')">
|
| 92 |
+
<img src="{{ static_root|default('/static') }}/images/cdx.webp" alt="CycloneDX Logo"
|
| 93 |
+
width="16" height="16" style="filter: brightness(0) invert(1);">
|
| 94 |
+
CycloneDX 1.7
|
| 95 |
+
</button>
|
| 96 |
+
</div>
|
| 97 |
+
</div>
|
| 98 |
+
</div>
|
| 99 |
+
</div>
|
| 100 |
+
|
| 101 |
+
<!-- Tabbed Content -->
|
| 102 |
+
<div class="aibom-viewer">
|
| 103 |
+
<div class="aibom-tabs">
|
| 104 |
+
<div class="aibom-tab active" onclick="switchTab('human-view')">Human-Friendly View</div>
|
| 105 |
+
<div class="aibom-tab" onclick="switchTab('field-checklist')">Field Checklist</div>
|
| 106 |
+
<div class="aibom-tab" onclick="switchTab('score-view')">Score Report</div>
|
| 107 |
+
<div class="aibom-tab" onclick="switchTab('json-view')">JSON View</div>
|
| 108 |
+
</div>
|
| 109 |
+
|
| 110 |
+
<!-- Human-Friendly View Tab -->
|
| 111 |
+
<div id="human-view" class="tab-content active">
|
| 112 |
+
<div class="aibom-section">
|
| 113 |
+
<h4>🤖 AI Model Information</h4>
|
| 114 |
+
<div class="aibom-property">
|
| 115 |
+
<span class="property-name">Name:</span>
|
| 116 |
+
<span class="property-value">
|
| 117 |
+
{{ aibom.components[0].name if aibom.components else 'Not specified' }}
|
| 118 |
+
</span>
|
| 119 |
+
</div>
|
| 120 |
+
<div class="aibom-property">
|
| 121 |
+
<span class="property-name">Type:</span>
|
| 122 |
+
<span class="property-value">
|
| 123 |
+
{{ aibom.components[0].type if aibom.components else 'Not specified' }}
|
| 124 |
+
</span>
|
| 125 |
+
</div>
|
| 126 |
+
<div class="aibom-property">
|
| 127 |
+
<span class="property-name">Version:</span>
|
| 128 |
+
<span class="property-value">{{ aibom.components[0].version if aibom.components else 'Not
|
| 129 |
+
specified' }}</span>
|
| 130 |
+
</div>
|
| 131 |
+
<div class="aibom-property">
|
| 132 |
+
<span class="property-name">Description:</span>
|
| 133 |
+
<span class="property-value">{{ aibom.components[0].description if aibom.components and
|
| 134 |
+
aibom.components[0].description else 'Not specified' }}</span>
|
| 135 |
+
</div>
|
| 136 |
+
<div class="aibom-property">
|
| 137 |
+
<span class="property-name">PURL:</span>
|
| 138 |
+
<span class="property-value">{{ aibom.components[0].purl if aibom.components and
|
| 139 |
+
aibom.components[0].purl else 'Not specified' }}</span>
|
| 140 |
+
</div>
|
| 141 |
+
{% if aibom.components and aibom.components[0].licenses %}
|
| 142 |
+
<div class="aibom-property">
|
| 143 |
+
<span class="property-name">Licenses:</span>
|
| 144 |
+
<span class="property-value">
|
| 145 |
+
{% for license in aibom.components[0].licenses %}
|
| 146 |
+
<span class="tag">
|
| 147 |
+
{% if license.license %}
|
| 148 |
+
{{ license.license.id if license.license.id else license.license.name }}
|
| 149 |
+
{% else %}
|
| 150 |
+
Unknown
|
| 151 |
+
{% endif %}
|
| 152 |
+
</span>
|
| 153 |
+
{% endfor %}
|
| 154 |
+
</span>
|
| 155 |
+
</div>
|
| 156 |
+
{% endif %}
|
| 157 |
+
</div>
|
| 158 |
+
|
| 159 |
+
{% if aibom.components and aibom.components[0].modelCard %}
|
| 160 |
+
<div class="aibom-section">
|
| 161 |
+
<h4>📊 Model Card</h4>
|
| 162 |
+
{% if aibom.components[0].modelCard.modelParameters %}
|
| 163 |
+
<div class="aibom-property">
|
| 164 |
+
<span class="property-name">Architecture:</span>
|
| 165 |
+
<span class="property-value">
|
| 166 |
+
{{ aibom.components[0].modelCard.modelParameters.modelArchitecture if
|
| 167 |
+
aibom.components[0].modelCard.modelParameters.modelArchitecture else 'Not specified' }}
|
| 168 |
+
</span>
|
| 169 |
+
</div>
|
| 170 |
+
<div class="aibom-property">
|
| 171 |
+
<span class="property-name">Task:</span>
|
| 172 |
+
<span class="property-value">{{ aibom.components[0].modelCard.modelParameters.task if
|
| 173 |
+
aibom.components[0].modelCard.modelParameters.task else 'Not specified' }}</span>
|
| 174 |
+
</div>
|
| 175 |
+
{% endif %}
|
| 176 |
+
{% if aibom.components[0].modelCard.properties %}
|
| 177 |
+
<div class="aibom-property">
|
| 178 |
+
<span class="property-name">Additional Properties:</span>
|
| 179 |
+
<span class="property-value">
|
| 180 |
+
{% for prop in aibom.components[0].modelCard.properties %}
|
| 181 |
+
<span class="tag">{{ prop.name }}: {{ prop.value }}</span>
|
| 182 |
+
{% endfor %}
|
| 183 |
+
</span>
|
| 184 |
+
</div>
|
| 185 |
+
{% endif %}
|
| 186 |
+
{% set hp_props = [] %}
|
| 187 |
+
{% set quant_props = [] %}
|
| 188 |
+
{% if aibom.components[0].properties %}
|
| 189 |
+
{% for prop in aibom.components[0].properties %}
|
| 190 |
+
{% if prop.name.startswith('hyperparameter:') %}{% if hp_props.append(prop) %}{% endif %}{% endif %}
|
| 191 |
+
{% if prop.name.startswith('quantization:') %}{% if quant_props.append(prop) %}{% endif %}{% endif
|
| 192 |
+
%}
|
| 193 |
+
{% endfor %}
|
| 194 |
+
{% endif %}
|
| 195 |
+
{% if hp_props %}
|
| 196 |
+
<div class="aibom-property">
|
| 197 |
+
<span class="property-name">Hyperparameters:</span>
|
| 198 |
+
<span class="property-value">
|
| 199 |
+
{% for prop in hp_props %}
|
| 200 |
+
<span class="tag">{{ prop.name.split(':')[1] | replace('_', ' ') | title }}: {{ prop.value
|
| 201 |
+
}}</span>
|
| 202 |
+
{% endfor %}
|
| 203 |
+
</span>
|
| 204 |
+
</div>
|
| 205 |
+
{% endif %}
|
| 206 |
+
{% if quant_props %}
|
| 207 |
+
<div class="aibom-property">
|
| 208 |
+
<span class="property-name">Quantization:</span>
|
| 209 |
+
<span class="property-value">
|
| 210 |
+
{% for prop in quant_props %}
|
| 211 |
+
<span class="tag">{{ prop.name.split(':')[1] | replace('_', ' ') | title }}: {{ prop.value
|
| 212 |
+
}}</span>
|
| 213 |
+
{% endfor %}
|
| 214 |
+
</span>
|
| 215 |
+
</div>
|
| 216 |
+
{% endif %}
|
| 217 |
+
</div>
|
| 218 |
+
{% endif %}
|
| 219 |
+
|
| 220 |
+
{% if aibom.externalReferences %}
|
| 221 |
+
<div class="aibom-section">
|
| 222 |
+
<h4>🔗 External References</h4>
|
| 223 |
+
{% for ref in aibom.externalReferences %}
|
| 224 |
+
<div class="aibom-property">
|
| 225 |
+
<span class="property-name">{{ ref.type|title }}:</span>
|
| 226 |
+
<span class="property-value"><a href="{{ ref.url }}" target="_blank">{{ ref.url }}</a></span>
|
| 227 |
+
</div>
|
| 228 |
+
{% endfor %}
|
| 229 |
+
</div>
|
| 230 |
+
{% endif %}
|
| 231 |
+
|
| 232 |
+
<div class="aibom-section">
|
| 233 |
+
<h4>🛠️ Generation Metadata</h4>
|
| 234 |
+
<div class="aibom-property">
|
| 235 |
+
<span class="property-name">Generated by:</span>
|
| 236 |
+
<span class="property-value">{{ aibom.metadata.tools.components[0].name if aibom.metadata.tools
|
| 237 |
+
and aibom.metadata.tools.components else 'Unknown' }}</span>
|
| 238 |
+
</div>
|
| 239 |
+
<div class="aibom-property">
|
| 240 |
+
<span class="property-name">Timestamp:</span>
|
| 241 |
+
<span class="property-value">{{ aibom.metadata.timestamp }}</span>
|
| 242 |
+
</div>
|
| 243 |
+
{% if aibom.components and aibom.components[0].purl %}
|
| 244 |
+
<div class="aibom-property">
|
| 245 |
+
<span class="property-name">Component PURL:</span>
|
| 246 |
+
<span class="property-value"><a href="https://huggingface.co/{{ model_id }}" target="_blank">{{
|
| 247 |
+
aibom.components[0].purl }}</a></span>
|
| 248 |
+
</div>
|
| 249 |
+
{% elif aibom.metadata.component %}
|
| 250 |
+
<div class="aibom-property">
|
| 251 |
+
<span class="property-name">Component PURL:</span>
|
| 252 |
+
<span class="property-value">{{ aibom.metadata.component['bom-ref'] }}</span>
|
| 253 |
+
</div>
|
| 254 |
+
{% endif %}
|
| 255 |
+
</div>
|
| 256 |
+
</div>
|
| 257 |
+
|
| 258 |
+
<!-- Field Checklist Tab -->
|
| 259 |
+
<div id="field-checklist" class="tab-content">
|
| 260 |
+
<div class="content-section">
|
| 261 |
+
<h3>Field Checklist & Mapping</h3>
|
| 262 |
+
|
| 263 |
+
<!-- Field Type Legend -->
|
| 264 |
+
<div class="field-type-legend">
|
| 265 |
+
<h4>Legend</h4>
|
| 266 |
+
<div class="legend-item">
|
| 267 |
+
<span class="field-tier tier-critical"></span>
|
| 268 |
+
<span>Critical</span>
|
| 269 |
+
</div>
|
| 270 |
+
<div class="legend-item">
|
| 271 |
+
<span class="field-tier tier-important"></span>
|
| 272 |
+
<span>Important</span>
|
| 273 |
+
</div>
|
| 274 |
+
<div class="legend-item">
|
| 275 |
+
<span class="field-tier tier-supplementary"></span>
|
| 276 |
+
<span>Supplementary</span>
|
| 277 |
+
</div>
|
| 278 |
+
<div class="legend-item">
|
| 279 |
+
<strong>CDX</strong> = CycloneDX Standard
|
| 280 |
+
</div>
|
| 281 |
+
<div class="legend-item">
|
| 282 |
+
<strong>AI</strong> = AI-Specific Extension
|
| 283 |
+
</div>
|
| 284 |
+
</div>
|
| 285 |
+
|
| 286 |
+
<p>This breakdown outlines field categories and statuses in the AIBOM generated for model <strong><a
|
| 287 |
+
href="https://huggingface.co/{{ model_id }}" target="_blank">{{ model_id
|
| 288 |
+
}}</a></strong>, showing how each field impacts the completeness score.</p>
|
| 289 |
+
|
| 290 |
+
{% if completeness_score.field_checklist %}
|
| 291 |
+
<!-- Required Fields Category -->
|
| 292 |
+
<div class="category-table">
|
| 293 |
+
<h4>Required Fields Category</h4>
|
| 294 |
+
<table>
|
| 295 |
+
<thead>
|
| 296 |
+
<tr>
|
| 297 |
+
<th>Status</th>
|
| 298 |
+
<th>Field Name</th>
|
| 299 |
+
<th>Actual Location</th>
|
| 300 |
+
<th>Tier</th>
|
| 301 |
+
<th>Type</th>
|
| 302 |
+
</tr>
|
| 303 |
+
</thead>
|
| 304 |
+
<tbody>
|
| 305 |
+
{% set required_fields = ['bomFormat', 'specVersion', 'serialNumber', 'version'] %}
|
| 306 |
+
{% for field in required_fields %}
|
| 307 |
+
<tr>
|
| 308 |
+
<td>
|
| 309 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 310 |
+
<span class="check-mark">✔</span>
|
| 311 |
+
{% else %}
|
| 312 |
+
<span class="x-mark">✘</span>
|
| 313 |
+
{% endif %}
|
| 314 |
+
</td>
|
| 315 |
+
<td>{{ field }}</td>
|
| 316 |
+
<td>
|
| 317 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 318 |
+
$.{{ field }}
|
| 319 |
+
{% else %}
|
| 320 |
+
Not found
|
| 321 |
+
{% endif %}
|
| 322 |
+
</td>
|
| 323 |
+
<td><span class="field-tier tier-critical"></span> Critical</td>
|
| 324 |
+
<td>
|
| 325 |
+
{% set f_type = completeness_score.field_types.get(field, 'Unknown') %}
|
| 326 |
+
{% set f_url = completeness_score.reference_urls.get(field, '') if
|
| 327 |
+
completeness_score.reference_urls else '' %}
|
| 328 |
+
{% if f_url %}
|
| 329 |
+
<a href="{{ f_url }}" target="_blank">{{ f_type }}</a>
|
| 330 |
+
{% else %}
|
| 331 |
+
{{ f_type }}
|
| 332 |
+
{% endif %}
|
| 333 |
+
</td>
|
| 334 |
+
</tr>
|
| 335 |
+
{% endfor %}
|
| 336 |
+
</tbody>
|
| 337 |
+
</table>
|
| 338 |
+
<div class="category-result">
|
| 339 |
+
Result: {{ completeness_score.category_details.required_fields.present_fields if
|
| 340 |
+
completeness_score.category_details else 'N/A' }}/{{
|
| 341 |
+
completeness_score.category_details.required_fields.total_fields if
|
| 342 |
+
completeness_score.category_details else 'N/A' }} present
|
| 343 |
+
({{ completeness_score.category_details.required_fields.percentage if
|
| 344 |
+
completeness_score.category_details else 'N/A' }}%) =
|
| 345 |
+
{{ completeness_score.section_scores.required_fields if completeness_score.section_scores
|
| 346 |
+
else 'N/A' }}/20 points
|
| 347 |
+
</div>
|
| 348 |
+
</div>
|
| 349 |
+
|
| 350 |
+
<!-- Metadata Category -->
|
| 351 |
+
<div class="category-table">
|
| 352 |
+
<h4>Metadata Category</h4>
|
| 353 |
+
<table>
|
| 354 |
+
<thead>
|
| 355 |
+
<tr>
|
| 356 |
+
<th>Status</th>
|
| 357 |
+
<th>Field Name</th>
|
| 358 |
+
<th>Actual Location</th>
|
| 359 |
+
<th>Tier</th>
|
| 360 |
+
<th>Type</th>
|
| 361 |
+
</tr>
|
| 362 |
+
</thead>
|
| 363 |
+
<tbody>
|
| 364 |
+
{% set metadata_fields = [
|
| 365 |
+
('primaryPurpose', 'Critical'),
|
| 366 |
+
('suppliedBy', 'Critical'),
|
| 367 |
+
('standardCompliance', 'Supplementary'),
|
| 368 |
+
('domain', 'Supplementary'),
|
| 369 |
+
('autonomyType', 'Supplementary')
|
| 370 |
+
] %}
|
| 371 |
+
{% for field, tier in metadata_fields %}
|
| 372 |
+
<tr>
|
| 373 |
+
<td>
|
| 374 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 375 |
+
<span class="check-mark">✔</span>
|
| 376 |
+
{% else %}
|
| 377 |
+
<span class="x-mark">✘</span>
|
| 378 |
+
{% endif %}
|
| 379 |
+
</td>
|
| 380 |
+
<td>{{ field }}</td>
|
| 381 |
+
<td>
|
| 382 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 383 |
+
{% if field == 'primaryPurpose' %}
|
| 384 |
+
$.components[0].modelCard.modelParameters.task
|
| 385 |
+
{% elif field == 'suppliedBy' %}
|
| 386 |
+
$.components[0].supplier.name
|
| 387 |
+
{% else %}
|
| 388 |
+
$.components[0].modelCard.properties[name="{{ field }}"]
|
| 389 |
+
{% endif %}
|
| 390 |
+
{% else %}
|
| 391 |
+
Not found
|
| 392 |
+
{% endif %}
|
| 393 |
+
</td>
|
| 394 |
+
<td><span class="field-tier tier-{{ tier|lower }}"></span> {{ tier }}</td>
|
| 395 |
+
<td>
|
| 396 |
+
{% set f_type = completeness_score.field_types.get(field, 'Unknown') %}
|
| 397 |
+
{% set f_url = completeness_score.reference_urls.get(field, '') if
|
| 398 |
+
completeness_score.reference_urls else '' %}
|
| 399 |
+
{% if f_url %}
|
| 400 |
+
<a href="{{ f_url }}" target="_blank">{{ f_type }}</a>
|
| 401 |
+
{% else %}
|
| 402 |
+
{{ f_type }}
|
| 403 |
+
{% endif %}
|
| 404 |
+
</td>
|
| 405 |
+
</tr>
|
| 406 |
+
{% endfor %}
|
| 407 |
+
</tbody>
|
| 408 |
+
</table>
|
| 409 |
+
<div class="category-result">
|
| 410 |
+
Result: {{ completeness_score.category_details.metadata.present_fields if
|
| 411 |
+
completeness_score.category_details else 'N/A' }}/{{
|
| 412 |
+
completeness_score.category_details.metadata.total_fields if
|
| 413 |
+
completeness_score.category_details else 'N/A' }} present
|
| 414 |
+
({{ completeness_score.category_details.metadata.percentage if
|
| 415 |
+
completeness_score.category_details else 'N/A' }}%) =
|
| 416 |
+
{{ completeness_score.section_scores.metadata if completeness_score.section_scores else
|
| 417 |
+
'N/A' }}/20 points
|
| 418 |
+
</div>
|
| 419 |
+
</div>
|
| 420 |
+
|
| 421 |
+
<!-- Component Basic Category -->
|
| 422 |
+
<div class="category-table">
|
| 423 |
+
<h4>Component Basic Category</h4>
|
| 424 |
+
<table>
|
| 425 |
+
<thead>
|
| 426 |
+
<tr>
|
| 427 |
+
<th>Status</th>
|
| 428 |
+
<th>Field Name</th>
|
| 429 |
+
<th>Actual Location</th>
|
| 430 |
+
<th>Tier</th>
|
| 431 |
+
<th>Type</th>
|
| 432 |
+
</tr>
|
| 433 |
+
</thead>
|
| 434 |
+
<tbody>
|
| 435 |
+
{% set component_basic_fields = [
|
| 436 |
+
('name', 'Critical'),
|
| 437 |
+
('type', 'Critical'),
|
| 438 |
+
('component_version', 'Critical'),
|
| 439 |
+
('purl', 'Important'),
|
| 440 |
+
('description', 'Important'),
|
| 441 |
+
('licenses', 'Important')
|
| 442 |
+
] %}
|
| 443 |
+
{% for field, tier in component_basic_fields %}
|
| 444 |
+
<tr>
|
| 445 |
+
<td>
|
| 446 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 447 |
+
<span class="check-mark">✔</span>
|
| 448 |
+
{% else %}
|
| 449 |
+
<span class="x-mark">��</span>
|
| 450 |
+
{% endif %}
|
| 451 |
+
</td>
|
| 452 |
+
<td>{% if field == 'component_version' %}version{% else %}{{ field }}{% endif %}
|
| 453 |
+
</td>
|
| 454 |
+
<td>
|
| 455 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 456 |
+
{% if field == 'component_version' %}
|
| 457 |
+
$.components[0].version
|
| 458 |
+
{% else %}
|
| 459 |
+
$.components[0].{{ field }}
|
| 460 |
+
{% endif %}
|
| 461 |
+
{% else %}
|
| 462 |
+
{% if field == 'description' %}
|
| 463 |
+
Not found in component level
|
| 464 |
+
{% else %}
|
| 465 |
+
Not found
|
| 466 |
+
{% endif %}
|
| 467 |
+
{% endif %}
|
| 468 |
+
</td>
|
| 469 |
+
<td><span class="field-tier tier-{{ tier|lower }}"></span> {{ tier }}</td>
|
| 470 |
+
<td>
|
| 471 |
+
{% set f_type = completeness_score.field_types.get(field, 'Unknown') %}
|
| 472 |
+
{% set f_url = completeness_score.reference_urls.get(field, '') if
|
| 473 |
+
completeness_score.reference_urls else '' %}
|
| 474 |
+
{% if f_url %}
|
| 475 |
+
<a href="{{ f_url }}" target="_blank">{{ f_type }}</a>
|
| 476 |
+
{% else %}
|
| 477 |
+
{{ f_type }}
|
| 478 |
+
{% endif %}
|
| 479 |
+
</td>
|
| 480 |
+
</tr>
|
| 481 |
+
{% endfor %}
|
| 482 |
+
</tbody>
|
| 483 |
+
</table>
|
| 484 |
+
<div class="category-result">
|
| 485 |
+
Result: {{ completeness_score.category_details.component_basic.present_fields if
|
| 486 |
+
completeness_score.category_details else 'N/A' }}/{{
|
| 487 |
+
completeness_score.category_details.component_basic.total_fields if
|
| 488 |
+
completeness_score.category_details else 'N/A' }} present
|
| 489 |
+
({{ completeness_score.category_details.component_basic.percentage if
|
| 490 |
+
completeness_score.category_details else 'N/A' }}%) =
|
| 491 |
+
{{ completeness_score.section_scores.component_basic if completeness_score.section_scores
|
| 492 |
+
else 'N/A' }}/20 points
|
| 493 |
+
</div>
|
| 494 |
+
</div>
|
| 495 |
+
|
| 496 |
+
<!-- Component Model Card Category -->
|
| 497 |
+
<div class="category-table">
|
| 498 |
+
<h4>Component Model Card Category</h4>
|
| 499 |
+
<table>
|
| 500 |
+
<thead>
|
| 501 |
+
<tr>
|
| 502 |
+
<th>Status</th>
|
| 503 |
+
<th>Field Name</th>
|
| 504 |
+
<th>Actual Location</th>
|
| 505 |
+
<th>Tier</th>
|
| 506 |
+
<th>Type</th>
|
| 507 |
+
</tr>
|
| 508 |
+
</thead>
|
| 509 |
+
<tbody>
|
| 510 |
+
{% set model_card_fields = completeness_score.category_fields_list.component_model_card
|
| 511 |
+
if completeness_score and completeness_score.category_fields_list else [] %}
|
| 512 |
+
{% for field_item in model_card_fields %}
|
| 513 |
+
{% set field = field_item.name %}
|
| 514 |
+
{% set tier = field_item.tier %}
|
| 515 |
+
<tr>
|
| 516 |
+
<td>
|
| 517 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 518 |
+
<span class="check-mark">✔</span>
|
| 519 |
+
{% else %}
|
| 520 |
+
<span class="x-mark">✘</span>
|
| 521 |
+
{% endif %}
|
| 522 |
+
</td>
|
| 523 |
+
<td>{{ field }}</td>
|
| 524 |
+
<td>
|
| 525 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 526 |
+
{{ field_item.path }}
|
| 527 |
+
{% else %}
|
| 528 |
+
Not found
|
| 529 |
+
{% endif %}
|
| 530 |
+
</td>
|
| 531 |
+
<td><span class="field-tier tier-{{ tier|lower }}"></span> {{ tier }}</td>
|
| 532 |
+
<td>
|
| 533 |
+
{% set f_type = completeness_score.field_types.get(field, 'Unknown') %}
|
| 534 |
+
{% set f_url = completeness_score.reference_urls.get(field, '') if
|
| 535 |
+
completeness_score.reference_urls else '' %}
|
| 536 |
+
{% if f_url %}
|
| 537 |
+
<a href="{{ f_url }}" target="_blank">{{ f_type }}</a>
|
| 538 |
+
{% else %}
|
| 539 |
+
{{ f_type }}
|
| 540 |
+
{% endif %}
|
| 541 |
+
</td>
|
| 542 |
+
</tr>
|
| 543 |
+
{% endfor %}
|
| 544 |
+
</tbody>
|
| 545 |
+
</table>
|
| 546 |
+
<div class="category-result">
|
| 547 |
+
Result: {{ completeness_score.category_details.component_model_card.present_fields if
|
| 548 |
+
completeness_score.category_details else 'N/A' }}/{{
|
| 549 |
+
completeness_score.category_details.component_model_card.total_fields if
|
| 550 |
+
completeness_score.category_details else 'N/A' }} present
|
| 551 |
+
({{ completeness_score.category_details.component_model_card.percentage if
|
| 552 |
+
completeness_score.category_details else 'N/A' }}%) =
|
| 553 |
+
{{ completeness_score.section_scores.component_model_card if
|
| 554 |
+
completeness_score.section_scores else 'N/A' }}/30 points
|
| 555 |
+
</div>
|
| 556 |
+
</div>
|
| 557 |
+
|
| 558 |
+
<!-- External References Category -->
|
| 559 |
+
<div class="category-table">
|
| 560 |
+
<h4>External References Category</h4>
|
| 561 |
+
<table>
|
| 562 |
+
<thead>
|
| 563 |
+
<tr>
|
| 564 |
+
<th>Status</th>
|
| 565 |
+
<th>Field Name</th>
|
| 566 |
+
<th>Actual Location</th>
|
| 567 |
+
<th>Tier</th>
|
| 568 |
+
<th>Type</th>
|
| 569 |
+
</tr>
|
| 570 |
+
</thead>
|
| 571 |
+
<tbody>
|
| 572 |
+
{% set external_ref_fields = completeness_score.category_fields_list.external_references
|
| 573 |
+
if completeness_score and completeness_score.category_fields_list else [] %}
|
| 574 |
+
{% for field_item in external_ref_fields %}
|
| 575 |
+
{% set field = field_item.name %}
|
| 576 |
+
{% set tier = field_item.tier %}
|
| 577 |
+
<tr>
|
| 578 |
+
<td>
|
| 579 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 580 |
+
<span class="check-mark">✔</span>
|
| 581 |
+
{% else %}
|
| 582 |
+
<span class="x-mark">✘</span>
|
| 583 |
+
{% endif %}
|
| 584 |
+
</td>
|
| 585 |
+
<td>{{ field }}</td>
|
| 586 |
+
<td>
|
| 587 |
+
{% if completeness_score.field_checklist.get(field, '').startswith('✔') %}
|
| 588 |
+
{{ field_item.path }}
|
| 589 |
+
{% else %}
|
| 590 |
+
Not found
|
| 591 |
+
{% endif %}
|
| 592 |
+
</td>
|
| 593 |
+
<td><span class="field-tier tier-{{ tier|lower }}"></span> {{ tier }}</td>
|
| 594 |
+
<td>
|
| 595 |
+
{% set f_type = completeness_score.field_types.get(field, 'Unknown') %}
|
| 596 |
+
{% set f_url = completeness_score.reference_urls.get(field, '') if
|
| 597 |
+
completeness_score.reference_urls else '' %}
|
| 598 |
+
{% if f_url %}
|
| 599 |
+
<a href="{{ f_url }}" target="_blank">{{ f_type }}</a>
|
| 600 |
+
{% else %}
|
| 601 |
+
{{ f_type }}
|
| 602 |
+
{% endif %}
|
| 603 |
+
</td>
|
| 604 |
+
</tr>
|
| 605 |
+
{% endfor %}
|
| 606 |
+
</tbody>
|
| 607 |
+
</table>
|
| 608 |
+
<div class="category-result">
|
| 609 |
+
Result: {{ completeness_score.category_details.external_references.present_fields if
|
| 610 |
+
completeness_score.category_details else 'N/A' }}/{{
|
| 611 |
+
completeness_score.category_details.external_references.total_fields if
|
| 612 |
+
completeness_score.category_details else 'N/A' }} present
|
| 613 |
+
({{ completeness_score.category_details.external_references.percentage if
|
| 614 |
+
completeness_score.category_details else 'N/A' }}%) =
|
| 615 |
+
{{ completeness_score.section_scores.external_references if
|
| 616 |
+
completeness_score.section_scores else 'N/A' }}/10 points
|
| 617 |
+
</div>
|
| 618 |
+
</div>
|
| 619 |
+
|
| 620 |
+
{% else %}
|
| 621 |
+
<p>Field checklist data not available.</p>
|
| 622 |
+
{% endif %}
|
| 623 |
+
</div>
|
| 624 |
+
</div>
|
| 625 |
+
|
| 626 |
+
<!-- Score Report Tab -->
|
| 627 |
+
<div id="score-view" class="tab-content">
|
| 628 |
+
<div class="content-section">
|
| 629 |
+
<h3>📊 Completeness Score Report</h3>
|
| 630 |
+
|
| 631 |
+
<!-- Total Score Display -->
|
| 632 |
+
<div class="total-score-container">
|
| 633 |
+
<div class="total-score">{{ (completeness_score.total_score if completeness_score.total_score !=
|
| 634 |
+
"Undefined" else 0)|round(1) }}/100</div>
|
| 635 |
+
<div class="total-progress">
|
| 636 |
+
<div class="progress-container">
|
| 637 |
+
<div class="progress-bar {{ score_class }}"
|
| 638 |
+
style="width: {{ score_percent|round|int }}%">
|
| 639 |
+
{{ score_percent|int }}% {{ score_label }}
|
| 640 |
+
</div>
|
| 641 |
+
</div>
|
| 642 |
+
</div>
|
| 643 |
+
</div>
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
<!-- Specific Breakdown for This SBOM -->
|
| 648 |
+
<div class="note-box">
|
| 649 |
+
<h4>Your AIBOM Breakdown</h4>
|
| 650 |
+
<p><strong>Model:</strong> <a href="https://huggingface.co/{{ model_id }}" target="_blank">{{
|
| 651 |
+
model_id }}</a></p>
|
| 652 |
+
|
| 653 |
+
<table class="score-table">
|
| 654 |
+
<thead>
|
| 655 |
+
<tr>
|
| 656 |
+
<th>Category</th>
|
| 657 |
+
<th>Fields Present</th>
|
| 658 |
+
<th>Score</th>
|
| 659 |
+
<th>Progress</th>
|
| 660 |
+
</tr>
|
| 661 |
+
</thead>
|
| 662 |
+
<tbody>
|
| 663 |
+
{% if completeness_score.category_details and completeness_score.section_scores %}
|
| 664 |
+
{% set categories = [
|
| 665 |
+
('Required Fields', 'required_fields', 20),
|
| 666 |
+
('Metadata', 'metadata', 20),
|
| 667 |
+
('Component Basic', 'component_basic', 20),
|
| 668 |
+
('Model Card', 'component_model_card', 30),
|
| 669 |
+
('External References', 'external_references', 10)
|
| 670 |
+
] %}
|
| 671 |
+
{% for display_name, key, max_score in categories %}
|
| 672 |
+
<tr>
|
| 673 |
+
<td>{{ display_name }}</td>
|
| 674 |
+
<td>{{ completeness_score.category_details[key].present_fields }}/{{
|
| 675 |
+
completeness_score.category_details[key].total_fields }}</td>
|
| 676 |
+
<td>{{ completeness_score.section_scores[key]|round(1) }}/{{ max_score }}</td>
|
| 677 |
+
<td>
|
| 678 |
+
<div class="progress-container">
|
| 679 |
+
{% set percentage = completeness_score.category_details[key].percentage %}
|
| 680 |
+
{% if percentage >= 80 %}
|
| 681 |
+
{% set progress_class = "progress-excellent" %}
|
| 682 |
+
{% elif percentage >= 60 %}
|
| 683 |
+
{% set progress_class = "progress-good" %}
|
| 684 |
+
{% elif percentage >= 40 %}
|
| 685 |
+
{% set progress_class = "progress-fair" %}
|
| 686 |
+
{% else %}
|
| 687 |
+
{% set progress_class = "progress-poor" %}
|
| 688 |
+
{% endif %}
|
| 689 |
+
<div class="progress-bar {{ progress_class }}"
|
| 690 |
+
style="width: {{ percentage|round|int }}%">{{ percentage|round|int }}%
|
| 691 |
+
</div>
|
| 692 |
+
</div>
|
| 693 |
+
</td>
|
| 694 |
+
</tr>
|
| 695 |
+
{% endfor %}
|
| 696 |
+
{% else %}
|
| 697 |
+
<tr>
|
| 698 |
+
<td colspan="4">Breakdown data not available</td>
|
| 699 |
+
</tr>
|
| 700 |
+
{% endif %}
|
| 701 |
+
</tbody>
|
| 702 |
+
</table>
|
| 703 |
+
</div>
|
| 704 |
+
|
| 705 |
+
<p><strong>Calculation:</strong></p>
|
| 706 |
+
<p>Subtotal:
|
| 707 |
+
{% if completeness_score.section_scores %}
|
| 708 |
+
{% for category, score in completeness_score.section_scores.items() %}
|
| 709 |
+
{{ score|round(1) }}{% if not loop.last %} + {% endif %}
|
| 710 |
+
{% endfor %}
|
| 711 |
+
= <strong>{{ completeness_score.subtotal_score|round(1) }}/100</strong>
|
| 712 |
+
{% else %}
|
| 713 |
+
<strong>{{ completeness_score.subtotal_score|round(1) }}/100</strong>
|
| 714 |
+
{% endif %}
|
| 715 |
+
</p>
|
| 716 |
+
|
| 717 |
+
{% if completeness_score.penalty_applied %}
|
| 718 |
+
<p>Penalty Applied: <strong>-{{ completeness_score.penalty_percentage }}%</strong> ({{
|
| 719 |
+
completeness_score.penalty_reason }})</p>
|
| 720 |
+
<p>Final Score: {{ completeness_score.subtotal_score|round(1) }} × {{
|
| 721 |
+
completeness_score.penalty_factor }} = <strong>{{ completeness_score.total_score|round(1)
|
| 722 |
+
}}/100</strong></p>
|
| 723 |
+
{% else %}
|
| 724 |
+
<p>No penalties applied</p>
|
| 725 |
+
<p>Final Score: <strong>{{ completeness_score.total_score|round(1) }}/100</strong></p>
|
| 726 |
+
{% endif %}
|
| 727 |
+
</div>
|
| 728 |
+
|
| 729 |
+
<!-- Missing Fields Analysis -->
|
| 730 |
+
{% if completeness_score.missing_counts %}
|
| 731 |
+
<div class="missing-fields">
|
| 732 |
+
<h4>Missing Fields Summary</h4>
|
| 733 |
+
<ul>
|
| 734 |
+
<li><strong>Critical:</strong> {{ completeness_score.missing_counts.critical }} missing</li>
|
| 735 |
+
<li><strong>Important:</strong> {{ completeness_score.missing_counts.important }} missing</li>
|
| 736 |
+
<li><strong>Supplementary:</strong> {{ completeness_score.missing_counts.supplementary }}
|
| 737 |
+
missing</li>
|
| 738 |
+
</ul>
|
| 739 |
+
|
| 740 |
+
{% if completeness_score.missing_counts.important >= 5 %}
|
| 741 |
+
<p><strong>Impact:</strong> Missing multiple critical and/or important fields will incur penalties
|
| 742 |
+
according to the Penalty Structure.</p>
|
| 743 |
+
{% endif %}
|
| 744 |
+
</div>
|
| 745 |
+
{% endif %}
|
| 746 |
+
|
| 747 |
+
<!-- Recommendations -->
|
| 748 |
+
{% if completeness_score.recommendations %}
|
| 749 |
+
<div class="recommendations">
|
| 750 |
+
<h4>General Recommendations to Improve AIBOM Completeness</h4>
|
| 751 |
+
<ul>
|
| 752 |
+
<li><strong>Required Fields:</strong> Ensure the model is published with a clear name, version,
|
| 753 |
+
and hosting platform information to allow proper SBOM structuring.</li>
|
| 754 |
+
<li><strong>Metadata:</strong> Include author or organization name, purpose of the model, and
|
| 755 |
+
relevant timestamps in the model repository or card.</li>
|
| 756 |
+
<li><strong>Component Basic:</strong> Provide a descriptive model title, a meaningful
|
| 757 |
+
description, a valid license, and a consistent version reference (e.g., tags or commits).
|
| 758 |
+
</li>
|
| 759 |
+
<li><strong>Model Card:</strong> Fill out structured sections for model parameters, evaluation
|
| 760 |
+
metrics, limitations, and ethical considerations to enable full transparency.</li>
|
| 761 |
+
<li><strong>External References:</strong> Add links to source code, datasets, documentation, and
|
| 762 |
+
versioned download locations to support traceability and reproducibility.</li>
|
| 763 |
+
</ul>
|
| 764 |
+
</div>
|
| 765 |
+
|
| 766 |
+
<!-- Generic Scoring Explanation -->
|
| 767 |
+
<div class="scoring-rubric">
|
| 768 |
+
<h4>How AIBOM Completeness is Scored</h4>
|
| 769 |
+
<p>The completeness score evaluates how well your AIBOM documents the model across five key
|
| 770 |
+
categories:</p>
|
| 771 |
+
<ul>
|
| 772 |
+
<li><strong>Required Fields ({{ completeness_score.category_details.required_fields.max_points
|
| 773 |
+
if completeness_score.category_details else 'N/A' }} points):</strong> Basic SBOM
|
| 774 |
+
structure mandated by CycloneDX
|
| 775 |
+
</li>
|
| 776 |
+
<li><strong>Metadata ({{ completeness_score.category_details.metadata.max_points if
|
| 777 |
+
completeness_score.category_details else 'N/A' }} points):</strong> Information about
|
| 778 |
+
the SBOM generation and model
|
| 779 |
+
purpose</li>
|
| 780 |
+
<li><strong>Component Basic ({{ completeness_score.category_details.component_basic.max_points
|
| 781 |
+
if completeness_score.category_details else 'N/A' }} points):</strong> Essential model
|
| 782 |
+
identification and licensing
|
| 783 |
+
</li>
|
| 784 |
+
<li><strong>Model Card ({{ completeness_score.category_details.component_model_card.max_points
|
| 785 |
+
if completeness_score.category_details else 'N/A' }} points):</strong> Detailed
|
| 786 |
+
AI-specific documentation for transparency
|
| 787 |
+
</li>
|
| 788 |
+
<li><strong>External References ({{
|
| 789 |
+
completeness_score.category_details.external_references.max_points if
|
| 790 |
+
completeness_score.category_details else 'N/A' }} points):</strong> Links to model
|
| 791 |
+
resources and documentation
|
| 792 |
+
</li>
|
| 793 |
+
</ul>
|
| 794 |
+
|
| 795 |
+
<p><strong>Calculation Method:</strong></p>
|
| 796 |
+
<p>Each category score = (Present Fields ÷ Total Fields) × Maximum Points</p>
|
| 797 |
+
<p>Subtotal = Sum of all category scores</p>
|
| 798 |
+
<p>Final Score = Subtotal × Penalty Factor (if applicable)</p>
|
| 799 |
+
<h4>Penalty Structure:</h4>
|
| 800 |
+
<p><strong>Critical Fields Missing:</strong></p>
|
| 801 |
+
<ul>
|
| 802 |
+
<li>0-1 missing: No penalty</li>
|
| 803 |
+
<li>2-3 missing: 10% penalty (×0.9)</li>
|
| 804 |
+
<li>4+ missing: 20% penalty (×0.8)</li>
|
| 805 |
+
</ul>
|
| 806 |
+
|
| 807 |
+
<p><strong>Important Fields Missing:</strong></p>
|
| 808 |
+
<ul>
|
| 809 |
+
<li>0-4 missing: No penalty</li>
|
| 810 |
+
<li>5+ missing: 5% penalty (×0.95)</li>
|
| 811 |
+
</ul>
|
| 812 |
+
|
| 813 |
+
<p><strong>Note:</strong> Penalties are cumulative and applied to the subtotal. For example, if you
|
| 814 |
+
have 3 critical fields missing AND 5 important fields missing, both penalties apply: Subtotal ×
|
| 815 |
+
0.9 × 0.95 = Final Score.</p>
|
| 816 |
+
|
| 817 |
+
</div>
|
| 818 |
+
{% endif %}
|
| 819 |
+
</div>
|
| 820 |
+
</div>
|
| 821 |
+
|
| 822 |
+
<!-- JSON View Tab -->
|
| 823 |
+
<div id="json-view" class="tab-content">
|
| 824 |
+
<div class="content-section">
|
| 825 |
+
<h3>📄 Raw JSON View</h3>
|
| 826 |
+
<p>This is the complete AIBOM components array in CycloneDX JSON format:</p>
|
| 827 |
+
<div class="json-view">
|
| 828 |
+
<pre>{{ components_json }}</pre>
|
| 829 |
+
</div>
|
| 830 |
+
</div>
|
| 831 |
+
</div>
|
| 832 |
+
|
| 833 |
+
<!-- Modern Footer -->
|
| 834 |
+
{% include 'includes/footer.html' %}
|
| 835 |
+
</div>
|
| 836 |
+
|
| 837 |
+
<script>
|
| 838 |
+
const AIBOM_CDX_JSON_1_6 = {{ aibom_cdx_json_1_6 | safe }};
|
| 839 |
+
const AIBOM_CDX_JSON_1_7 = {{ aibom_cdx_json_1_7 | safe }};
|
| 840 |
+
const FILENAME_BASE = "{{ model_id|replace('/', '_') }}";
|
| 841 |
+
</script>
|
| 842 |
+
<script src="{{ static_root|default('/static') }}/js/script.js"></script>
|
| 843 |
+
</body>
|
| 844 |
+
|
| 845 |
+
</html>
|
src/utils/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .rate_limiting import RateLimitMiddleware, ConcurrencyLimitMiddleware, RequestSizeLimitMiddleware
|
| 2 |
+
from .captcha import verify_recaptcha
|
| 3 |
+
from .cleanup_utils import perform_cleanup
|
src/utils/analytics.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from datasets import Dataset, load_dataset, concatenate_datasets
|
| 5 |
+
from ..config import HF_REPO, HF_TOKEN
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
def log_sbom_generation(model_id: str):
|
| 10 |
+
"""Logs a successful SBOM generation event to the Hugging Face dataset."""
|
| 11 |
+
if not HF_TOKEN:
|
| 12 |
+
logger.warning("HF_TOKEN not set. Skipping SBOM generation logging.")
|
| 13 |
+
return
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
if not HF_TOKEN:
|
| 17 |
+
return
|
| 18 |
+
|
| 19 |
+
import asyncio
|
| 20 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 21 |
+
|
| 22 |
+
# Define the synchronous task
|
| 23 |
+
def _push_log():
|
| 24 |
+
try:
|
| 25 |
+
normalized_model_id = model_id
|
| 26 |
+
log_data = {
|
| 27 |
+
"timestamp": [datetime.utcnow().isoformat()],
|
| 28 |
+
"event": ["generated"],
|
| 29 |
+
"model_id": [normalized_model_id]
|
| 30 |
+
}
|
| 31 |
+
ds_new_log = Dataset.from_dict(log_data)
|
| 32 |
+
|
| 33 |
+
# Optimisation: Try to append if possible, but datasets library is heavy.
|
| 34 |
+
# Just catch errors to ensure main thread never crashes.
|
| 35 |
+
try:
|
| 36 |
+
existing_ds = load_dataset(HF_REPO, token=HF_TOKEN, split='train', trust_remote_code=True)
|
| 37 |
+
if len(existing_ds) > 0:
|
| 38 |
+
ds_to_push = concatenate_datasets([existing_ds, ds_new_log])
|
| 39 |
+
else:
|
| 40 |
+
ds_to_push = ds_new_log
|
| 41 |
+
except Exception as load_err:
|
| 42 |
+
logger.info(f"Could not load existing dataset: {load_err}. Creating new.")
|
| 43 |
+
ds_to_push = ds_new_log
|
| 44 |
+
|
| 45 |
+
ds_to_push.push_to_hub(HF_REPO, token=HF_TOKEN, private=True)
|
| 46 |
+
logger.info(f"Successfully logged SBOM generation for {model_id}")
|
| 47 |
+
except Exception as e:
|
| 48 |
+
logger.error(f"Background analytics failed: {e}")
|
| 49 |
+
|
| 50 |
+
# Fire and forget in a separate thread
|
| 51 |
+
# Use existing event loop if available, else fire in thread
|
| 52 |
+
loop = None
|
| 53 |
+
try:
|
| 54 |
+
loop = asyncio.get_running_loop()
|
| 55 |
+
except RuntimeError:
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
if loop and loop.is_running():
|
| 59 |
+
loop.run_in_executor(None, _push_log)
|
| 60 |
+
else:
|
| 61 |
+
# Fallback for sync contexts (like CLI)
|
| 62 |
+
ThreadPoolExecutor(max_workers=1).submit(_push_log)
|
| 63 |
+
|
| 64 |
+
except Exception as e:
|
| 65 |
+
logger.error(f"Failed to initiate analytics logging: {e}")
|
| 66 |
+
|
| 67 |
+
def get_sbom_count() -> str:
|
| 68 |
+
"""Retrieves the total count of generated SBOMs."""
|
| 69 |
+
if not HF_TOKEN:
|
| 70 |
+
return "N/A"
|
| 71 |
+
try:
|
| 72 |
+
ds = load_dataset(HF_REPO, token=HF_TOKEN, split='train', trust_remote_code=True)
|
| 73 |
+
return f"{len(ds):,}"
|
| 74 |
+
except Exception as e:
|
| 75 |
+
logger.error(f"Failed to retrieve SBOM count: {e}")
|
| 76 |
+
return "N/A"
|
src/utils/captcha.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__ )
|
| 7 |
+
|
| 8 |
+
# Get the secret key from environment variable
|
| 9 |
+
RECAPTCHA_SECRET_KEY = os.environ.get("RECAPTCHA_SECRET_KEY")
|
| 10 |
+
|
| 11 |
+
def verify_recaptcha(response_token: Optional[str]) -> bool:
|
| 12 |
+
# LOGGING: Log the token start
|
| 13 |
+
logger.info(f"Starting reCAPTCHA verification with token: {response_token[:10]}..." if response_token else "None")
|
| 14 |
+
|
| 15 |
+
# Check if secret key is set
|
| 16 |
+
secret_key = os.environ.get("RECAPTCHA_SECRET_KEY")
|
| 17 |
+
if not secret_key:
|
| 18 |
+
logger.warning("RECAPTCHA_SECRET_KEY not set, bypassing verification")
|
| 19 |
+
return True
|
| 20 |
+
else:
|
| 21 |
+
# LOGGING: Log that secret key is set
|
| 22 |
+
logger.info("RECAPTCHA_SECRET_KEY is set (not showing for security)")
|
| 23 |
+
|
| 24 |
+
# If no token provided, verification fails
|
| 25 |
+
if not response_token:
|
| 26 |
+
logger.warning("No reCAPTCHA response token provided")
|
| 27 |
+
return False
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
# LOGGING: Log before making request
|
| 31 |
+
logger.info("Sending verification request to Google reCAPTCHA API")
|
| 32 |
+
verification_response = requests.post(
|
| 33 |
+
"https://www.google.com/recaptcha/api/siteverify",
|
| 34 |
+
data={
|
| 35 |
+
"secret": secret_key,
|
| 36 |
+
"response": response_token
|
| 37 |
+
}
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
result = verification_response.json()
|
| 41 |
+
# LOGGING: Log the complete result from Google
|
| 42 |
+
logger.info(f"reCAPTCHA verification result: {result}")
|
| 43 |
+
|
| 44 |
+
if result.get("success"):
|
| 45 |
+
logger.info("reCAPTCHA verification successful")
|
| 46 |
+
return True
|
| 47 |
+
else:
|
| 48 |
+
# LOGGING: Log the specific error codes
|
| 49 |
+
logger.warning(f"reCAPTCHA verification failed: {result.get('error-codes', [])}")
|
| 50 |
+
return False
|
| 51 |
+
except Exception as e:
|
| 52 |
+
# LOGGING: Log any exceptions
|
| 53 |
+
logger.error(f"Error verifying reCAPTCHA: {str(e)}")
|
| 54 |
+
return False
|
| 55 |
+
|
src/utils/cleanup_utils.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from datetime import datetime, timedelta
|
| 4 |
+
|
| 5 |
+
logger = logging.getLogger(__name__)
|
| 6 |
+
|
| 7 |
+
def cleanup_old_files(directory, max_age_days=7):
|
| 8 |
+
"""
|
| 9 |
+
Remove files older than max_age_days from the specified directory.
|
| 10 |
+
Optimized to use os.scandir for better performance.
|
| 11 |
+
"""
|
| 12 |
+
if not os.path.exists(directory):
|
| 13 |
+
logger.warning(f"Directory does not exist: {directory}")
|
| 14 |
+
return 0
|
| 15 |
+
|
| 16 |
+
removed_count = 0
|
| 17 |
+
now = datetime.now()
|
| 18 |
+
cutoff_time = now - timedelta(days=max_age_days)
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
with os.scandir(directory) as entries:
|
| 22 |
+
for entry in entries:
|
| 23 |
+
if entry.is_file():
|
| 24 |
+
try:
|
| 25 |
+
# entry.stat().st_mtime is faster than os.path.getmtime
|
| 26 |
+
file_mtime = datetime.fromtimestamp(entry.stat().st_mtime)
|
| 27 |
+
if file_mtime < cutoff_time:
|
| 28 |
+
os.remove(entry.path)
|
| 29 |
+
removed_count += 1
|
| 30 |
+
logger.info(f"Removed old file: {entry.path}")
|
| 31 |
+
except OSError as e:
|
| 32 |
+
logger.error(f"Error accessing/removing file {entry.path}: {e}")
|
| 33 |
+
|
| 34 |
+
if removed_count > 0:
|
| 35 |
+
logger.info(f"Cleanup completed: removed {removed_count} files older than {max_age_days} days from {directory}")
|
| 36 |
+
return removed_count
|
| 37 |
+
except Exception as e:
|
| 38 |
+
logger.error(f"Error during cleanup of directory {directory}: {e}")
|
| 39 |
+
return 0
|
| 40 |
+
|
| 41 |
+
def limit_file_count(directory, max_files=1000):
|
| 42 |
+
"""
|
| 43 |
+
Ensure no more than max_files are kept in the directory (removes oldest first).
|
| 44 |
+
Optimized to use os.scandir.
|
| 45 |
+
"""
|
| 46 |
+
if not os.path.exists(directory):
|
| 47 |
+
logger.warning(f"Directory does not exist: {directory}")
|
| 48 |
+
return 0
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
files = []
|
| 52 |
+
with os.scandir(directory) as entries:
|
| 53 |
+
for entry in entries:
|
| 54 |
+
if entry.is_file():
|
| 55 |
+
files.append((entry.path, entry.stat().st_mtime))
|
| 56 |
+
|
| 57 |
+
# If we are within limits, return early
|
| 58 |
+
if len(files) <= max_files:
|
| 59 |
+
return 0
|
| 60 |
+
|
| 61 |
+
# Sort by modification time (oldest first)
|
| 62 |
+
files.sort(key=lambda x: x[1])
|
| 63 |
+
|
| 64 |
+
# Remove oldest files if we exceed the limit
|
| 65 |
+
files_to_remove = files[:-max_files]
|
| 66 |
+
removed_count = 0
|
| 67 |
+
|
| 68 |
+
for file_path, _ in files_to_remove:
|
| 69 |
+
try:
|
| 70 |
+
os.remove(file_path)
|
| 71 |
+
removed_count += 1
|
| 72 |
+
logger.info(f"Removed excess file: {file_path}")
|
| 73 |
+
except OSError as e:
|
| 74 |
+
logger.error(f"Error removing file {file_path}: {e}")
|
| 75 |
+
|
| 76 |
+
logger.info(f"File count limit enforced: removed {removed_count} oldest files, keeping max {max_files}")
|
| 77 |
+
return removed_count
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Error during file count limiting in directory {directory}: {e}")
|
| 80 |
+
return 0
|
| 81 |
+
|
| 82 |
+
def perform_cleanup(directory, max_age_days=7, max_files=1000):
|
| 83 |
+
"""Perform both time-based and count-based cleanup."""
|
| 84 |
+
time_removed = cleanup_old_files(directory, max_age_days)
|
| 85 |
+
count_removed = limit_file_count(directory, max_files)
|
| 86 |
+
return time_removed + count_removed
|
src/utils/formatter.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import copy
|
| 3 |
+
from typing import Dict, Any
|
| 4 |
+
|
| 5 |
+
def export_aibom(aibom: Dict[str, Any], bom_type: str = "cyclonedx", spec_version: str = "1.6") -> str:
|
| 6 |
+
"""
|
| 7 |
+
Exports the internal AIBOM object into a specified format and specification version.
|
| 8 |
+
Returns the generated SBOM as a formatted JSON string.
|
| 9 |
+
"""
|
| 10 |
+
# Create a deep copy to avoid modifying the original unified object
|
| 11 |
+
output = copy.deepcopy(aibom)
|
| 12 |
+
|
| 13 |
+
if bom_type.lower() == "cyclonedx":
|
| 14 |
+
output["bomFormat"] = "CycloneDX"
|
| 15 |
+
output["specVersion"] = spec_version
|
| 16 |
+
# Any specific CycloneDX mappings or adjustments can be placed here over time.
|
| 17 |
+
|
| 18 |
+
elif bom_type.lower() == "spdx":
|
| 19 |
+
# Placeholder for future SPDX generation logic
|
| 20 |
+
output["bomFormat"] = "SPDX"
|
| 21 |
+
output["specVersion"] = spec_version
|
| 22 |
+
# Since spdx mapping logic to AIBOM isn't fully built yet, this serves as the routing hook
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
return json.dumps(output, indent=2)
|
src/utils/license_utils.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
License utility functions for normalising and verifying SPDX license IDs.
|
| 3 |
+
"""
|
| 4 |
+
import logging
|
| 5 |
+
from typing import Optional, Dict
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
# Common mapping of license names or incomplete IDs to generic URLs or valid SPDX
|
| 10 |
+
LICENSE_URLS: Dict[str, str] = {
|
| 11 |
+
"Apache-2.0": "https://www.apache.org/licenses/LICENSE-2.0.txt",
|
| 12 |
+
"MIT": "https://opensource.org/licenses/MIT",
|
| 13 |
+
"BSD-3-Clause": "https://opensource.org/licenses/BSD-3-Clause",
|
| 14 |
+
"BSD-2-Clause": "https://opensource.org/licenses/BSD-2-Clause",
|
| 15 |
+
"GPL-3.0-only": "https://www.gnu.org/licenses/gpl-3.0.txt",
|
| 16 |
+
"GPL-2.0-only": "https://www.gnu.org/licenses/gpl-2.0.txt",
|
| 17 |
+
"LGPL-3.0-only": "https://www.gnu.org/licenses/lgpl-3.0.txt",
|
| 18 |
+
"CC-BY-4.0": "https://creativecommons.org/licenses/by/4.0/legalcode",
|
| 19 |
+
"CC-BY-SA-4.0": "https://creativecommons.org/licenses/by-sa/4.0/legalcode",
|
| 20 |
+
"CC-BY-NC-4.0": "https://creativecommons.org/licenses/by-nc/4.0/legalcode",
|
| 21 |
+
"CC-BY-ND-4.0": "https://creativecommons.org/licenses/by-nd/4.0/legalcode",
|
| 22 |
+
"CC-BY-NC-SA-4.0": "https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode",
|
| 23 |
+
"CC-BY-NC-ND-4.0": "https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode",
|
| 24 |
+
"CC0-1.0": "https://creativecommons.org/publicdomain/zero/1.0/legalcode",
|
| 25 |
+
"MPL-2.0": "https://www.mozilla.org/en-US/MPL/2.0/",
|
| 26 |
+
"Unlicense": "https://unlicense.org/",
|
| 27 |
+
"nvidia-open-model-license": "https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# Mapping common variations to valid SPDX IDs
|
| 31 |
+
LICENSE_MAPPING: Dict[str, str] = {
|
| 32 |
+
"apache license 2.0": "Apache-2.0",
|
| 33 |
+
"apache-2.0": "Apache-2.0",
|
| 34 |
+
"mit": "MIT",
|
| 35 |
+
"mit license": "MIT",
|
| 36 |
+
"bsd-3-clause": "BSD-3-Clause",
|
| 37 |
+
"cc-by-4.0": "CC-BY-4.0",
|
| 38 |
+
"cc-by-nc-4.0": "CC-BY-NC-4.0",
|
| 39 |
+
"cc0-1.0": "CC0-1.0",
|
| 40 |
+
"gpl-3.0": "GPL-3.0-only",
|
| 41 |
+
"nvidia open model license agreement": "nvidia-open-model-license",
|
| 42 |
+
# Add more as needed
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
def normalize_license_id(license_id: str) -> Optional[str]:
|
| 46 |
+
"""
|
| 47 |
+
Normalize a license string to a valid SPDX ID if possible.
|
| 48 |
+
Returns None if no clear mapping is found.
|
| 49 |
+
"""
|
| 50 |
+
if not license_id:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
# Check if exact match in our known list
|
| 54 |
+
if license_id in LICENSE_URLS:
|
| 55 |
+
return license_id
|
| 56 |
+
|
| 57 |
+
lower_id = license_id.lower()
|
| 58 |
+
|
| 59 |
+
# Check mapping
|
| 60 |
+
if lower_id in LICENSE_MAPPING:
|
| 61 |
+
return LICENSE_MAPPING[lower_id]
|
| 62 |
+
|
| 63 |
+
# Check if any key in URLS (case-insensitive) matches
|
| 64 |
+
for valid_id in LICENSE_URLS:
|
| 65 |
+
if valid_id.lower() == lower_id:
|
| 66 |
+
return valid_id
|
| 67 |
+
|
| 68 |
+
# Simple heuristic: if it looks like an ID, return it (e.g. contains hyphens/dots, no spaces)
|
| 69 |
+
if " " not in license_id and len(license_id) < 50:
|
| 70 |
+
# Might be valid, might not. Let's return it and rely on validation warnings.
|
| 71 |
+
return license_id
|
| 72 |
+
|
| 73 |
+
return None
|
| 74 |
+
|
| 75 |
+
def get_license_url(license_id: str, fallback: bool = True) -> Optional[str]:
|
| 76 |
+
"""Get the URL for a license based on its ID.
|
| 77 |
+
If fallback is False, returns None if not in known list.
|
| 78 |
+
"""
|
| 79 |
+
if license_id in LICENSE_URLS:
|
| 80 |
+
return LICENSE_URLS[license_id]
|
| 81 |
+
|
| 82 |
+
# Case insensitive fallback
|
| 83 |
+
lower_id = license_id.lower()
|
| 84 |
+
for valid_id, url in LICENSE_URLS.items():
|
| 85 |
+
if valid_id.lower() == lower_id:
|
| 86 |
+
return url
|
| 87 |
+
|
| 88 |
+
return f"https://spdx.org/licenses/{license_id}.html" if fallback else None
|
| 89 |
+
|
| 90 |
+
# Global licensing instance
|
| 91 |
+
_licensing = None
|
| 92 |
+
|
| 93 |
+
def is_valid_spdx_license_id(license_id: str) -> bool:
|
| 94 |
+
"""Check if the license ID is a valid SPDX ID"""
|
| 95 |
+
global _licensing
|
| 96 |
+
try:
|
| 97 |
+
from license_expression import get_spdx_licensing
|
| 98 |
+
if _licensing is None:
|
| 99 |
+
_licensing = get_spdx_licensing()
|
| 100 |
+
|
| 101 |
+
# Validate that it is a valid SPDX expression AND a simple license ID (no AND/OR/WITH)
|
| 102 |
+
res = _licensing.validate(license_id)
|
| 103 |
+
if len(res.errors) > 0:
|
| 104 |
+
return False
|
| 105 |
+
|
| 106 |
+
# Parse expression to ensure it's a single license, not a compound expression
|
| 107 |
+
parsed = _licensing.parse(license_id)
|
| 108 |
+
# Check if it's a simple LicenseSymbol (single ID)
|
| 109 |
+
# license-expression objects: LicenseSymbol, LicenseExpression (AND, OR, WITH)
|
| 110 |
+
# We only want simple IDs for the 'id' field in CycloneDX
|
| 111 |
+
# (though CDX 'expression' field exists, 'id' must be a valid SPDX ID from the enum)
|
| 112 |
+
|
| 113 |
+
# Checking if it has children or is a symbol
|
| 114 |
+
# parsed object structure depends on library version, but safe bet is type check
|
| 115 |
+
# A simple license parses to a LicenseSymbol which has no 'children' usually,
|
| 116 |
+
# or we check if the string representation matches the input (normalized)
|
| 117 |
+
|
| 118 |
+
# Actually simplest way: check if it contains spaces or operators
|
| 119 |
+
# But let's use the library structure if possible.
|
| 120 |
+
# "MIT" -> LicenseSymbol
|
| 121 |
+
# "MIT OR Apache-2.0" -> OR expression
|
| 122 |
+
|
| 123 |
+
return hasattr(parsed, "key") and not hasattr(parsed, "children")
|
| 124 |
+
except ImportError:
|
| 125 |
+
logger.warning("license-expression library not found, skipping validation")
|
| 126 |
+
return True
|
| 127 |
+
except Exception as e:
|
| 128 |
+
logger.debug(f"License validation error: {e}")
|
| 129 |
+
return False
|
src/utils/rate_limiting.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from fastapi import Request
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
| 6 |
+
import logging
|
| 7 |
+
import asyncio # Concurrency limiting
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
class RateLimitMiddleware(BaseHTTPMiddleware):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
app,
|
| 15 |
+
rate_limit_per_minute=10,
|
| 16 |
+
rate_limit_window=60,
|
| 17 |
+
protected_routes=["/generate", "/api/generate", "/api/generate-with-report"]
|
| 18 |
+
):
|
| 19 |
+
super().__init__(app)
|
| 20 |
+
self.rate_limit_per_minute = rate_limit_per_minute
|
| 21 |
+
self.rate_limit_window = rate_limit_window
|
| 22 |
+
self.protected_routes = protected_routes
|
| 23 |
+
self.ip_requests = defaultdict(list)
|
| 24 |
+
logger.info(f"Rate limit middleware initialized: {rate_limit_per_minute} requests per {rate_limit_window}s")
|
| 25 |
+
|
| 26 |
+
async def dispatch(self, request: Request, call_next):
|
| 27 |
+
client_ip = request.client.host
|
| 28 |
+
current_time = time.time()
|
| 29 |
+
|
| 30 |
+
# Only apply rate limiting to protected routes
|
| 31 |
+
if any(request.url.path.startswith(route) for route in self.protected_routes):
|
| 32 |
+
# Clean up old requests for this IP
|
| 33 |
+
self.ip_requests[client_ip] = [t for t in self.ip_requests[client_ip]
|
| 34 |
+
if current_time - t < self.rate_limit_window]
|
| 35 |
+
|
| 36 |
+
# Periodic cleanup of all IPs (every ~100 requests to avoid overhead)
|
| 37 |
+
# In a production app, use a background task or Redis
|
| 38 |
+
if len(self.ip_requests) > 1000 and hash(client_ip) % 100 == 0:
|
| 39 |
+
self._cleanup_all_ips(current_time)
|
| 40 |
+
|
| 41 |
+
# Check if rate limit exceeded
|
| 42 |
+
if len(self.ip_requests[client_ip]) >= self.rate_limit_per_minute:
|
| 43 |
+
logger.warning(f"Rate limit exceeded for IP {client_ip} on {request.url.path}")
|
| 44 |
+
return JSONResponse(
|
| 45 |
+
status_code=429,
|
| 46 |
+
content={"detail": "Rate limit exceeded. Please try again later."}
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Add current request timestamp
|
| 50 |
+
self.ip_requests[client_ip].append(current_time)
|
| 51 |
+
|
| 52 |
+
# Process the request
|
| 53 |
+
response = await call_next(request)
|
| 54 |
+
return response
|
| 55 |
+
|
| 56 |
+
def _cleanup_all_ips(self, current_time):
|
| 57 |
+
"""Remove IPs that haven't made requests in the window"""
|
| 58 |
+
to_remove = []
|
| 59 |
+
for ip, timestamps in self.ip_requests.items():
|
| 60 |
+
# If latest timestamp is older than window, remove IP
|
| 61 |
+
if not timestamps or (current_time - timestamps[-1] > self.rate_limit_window):
|
| 62 |
+
to_remove.append(ip)
|
| 63 |
+
for ip in to_remove:
|
| 64 |
+
del self.ip_requests[ip]
|
| 65 |
+
|
| 66 |
+
class ConcurrencyLimitMiddleware(BaseHTTPMiddleware):
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
app,
|
| 70 |
+
max_concurrent_requests=5,
|
| 71 |
+
timeout=5.0,
|
| 72 |
+
protected_routes=None
|
| 73 |
+
):
|
| 74 |
+
super().__init__(app)
|
| 75 |
+
self.semaphore = asyncio.Semaphore(max_concurrent_requests)
|
| 76 |
+
self.timeout = timeout
|
| 77 |
+
self.protected_routes = protected_routes or ["/generate", "/api/generate", "/api/generate-with-report"]
|
| 78 |
+
logger.info(f"Concurrency limit middleware initialized: {max_concurrent_requests} concurrent requests")
|
| 79 |
+
|
| 80 |
+
async def dispatch(self, request, call_next):
|
| 81 |
+
try:
|
| 82 |
+
# Only apply to protected routes
|
| 83 |
+
if any(request.url.path.startswith(route) for route in self.protected_routes):
|
| 84 |
+
try:
|
| 85 |
+
# Try to acquire the semaphore
|
| 86 |
+
acquired = False
|
| 87 |
+
try:
|
| 88 |
+
# Use wait_for instead of timeout context manager for compatibility
|
| 89 |
+
await asyncio.wait_for(self.semaphore.acquire(), timeout=self.timeout)
|
| 90 |
+
acquired = True
|
| 91 |
+
return await call_next(request)
|
| 92 |
+
finally:
|
| 93 |
+
if acquired:
|
| 94 |
+
self.semaphore.release()
|
| 95 |
+
except asyncio.TimeoutError:
|
| 96 |
+
# Timeout waiting for semaphore
|
| 97 |
+
logger.warning(f"Concurrency limit reached for {request.url.path}")
|
| 98 |
+
return JSONResponse(
|
| 99 |
+
status_code=503,
|
| 100 |
+
content={"detail": "Server is at capacity. Please try again later."}
|
| 101 |
+
)
|
| 102 |
+
else:
|
| 103 |
+
# For non-protected routes, proceed normally
|
| 104 |
+
return await call_next(request)
|
| 105 |
+
except Exception as e:
|
| 106 |
+
logger.error(f"Error in ConcurrencyLimitMiddleware: {str(e)}")
|
| 107 |
+
return JSONResponse(
|
| 108 |
+
status_code=500,
|
| 109 |
+
content={"detail": f"Internal server error in middleware: {str(e)}"}
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# Protection against large request payloads
|
| 114 |
+
class RequestSizeLimitMiddleware(BaseHTTPMiddleware):
|
| 115 |
+
def __init__(self, app, max_content_length=1024*1024): # 1MB default
|
| 116 |
+
super().__init__(app)
|
| 117 |
+
self.max_content_length = max_content_length
|
| 118 |
+
logger.info(f"Request size limit middleware initialized: {max_content_length} bytes")
|
| 119 |
+
|
| 120 |
+
async def dispatch(self, request: Request, call_next):
|
| 121 |
+
content_length = request.headers.get('content-length')
|
| 122 |
+
if content_length:
|
| 123 |
+
if int(content_length) > self.max_content_length:
|
| 124 |
+
logger.warning(f"Request too large: {content_length} bytes")
|
| 125 |
+
return JSONResponse(
|
| 126 |
+
status_code=413,
|
| 127 |
+
content={"detail": "Request too large"}
|
| 128 |
+
)
|
| 129 |
+
return await call_next(request)
|
src/utils/summarizer.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional, List, Dict, Any
|
| 4 |
+
|
| 5 |
+
logger = logging.getLogger(__name__)
|
| 6 |
+
|
| 7 |
+
class LocalSummarizer:
|
| 8 |
+
"""
|
| 9 |
+
Singleton-style wrapper for local LLM summarization.
|
| 10 |
+
Enhances extraction using robust heuristic rules and LLM generation with retry logic.
|
| 11 |
+
"""
|
| 12 |
+
_tokenizer = None
|
| 13 |
+
_model = None
|
| 14 |
+
_model_name = "google/flan-t5-small"
|
| 15 |
+
|
| 16 |
+
@classmethod
|
| 17 |
+
def _load_model(cls):
|
| 18 |
+
"""Lazy load the model and tokenizer directly"""
|
| 19 |
+
if cls._model is None:
|
| 20 |
+
try:
|
| 21 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 22 |
+
import transformers
|
| 23 |
+
logger.info(f"⏳ Loading summarization model ({cls._model_name})...")
|
| 24 |
+
|
| 25 |
+
old_verbosity = transformers.logging.get_verbosity()
|
| 26 |
+
transformers.logging.set_verbosity_error()
|
| 27 |
+
|
| 28 |
+
cls._tokenizer = AutoTokenizer.from_pretrained(cls._model_name)
|
| 29 |
+
cls._model = AutoModelForSeq2SeqLM.from_pretrained(cls._model_name)
|
| 30 |
+
|
| 31 |
+
transformers.logging.set_verbosity(old_verbosity)
|
| 32 |
+
logger.info("✅ Summarization model loaded successfully")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
logger.error(f"❌ Failed to load summarization model: {e}")
|
| 35 |
+
cls._model = False # Mark as failed
|
| 36 |
+
|
| 37 |
+
@staticmethod
|
| 38 |
+
def _strip_yaml_frontmatter(text: str) -> str:
|
| 39 |
+
"""Strip the YAML frontmatter enclosed in ---"""
|
| 40 |
+
return re.sub(r'^---\s*\n.*?\n---\s*\n', '', text, flags=re.MULTILINE | re.DOTALL)
|
| 41 |
+
|
| 42 |
+
@staticmethod
|
| 43 |
+
def _extract_candidates(text: str) -> List[str]:
|
| 44 |
+
candidates = []
|
| 45 |
+
|
| 46 |
+
# 1. Section Headers (support "1. Introduction")
|
| 47 |
+
heading_matches = re.finditer(r'^#+\s*(?:\d+[\.\)]?\s*)?(Description|Model [dD]escription|Model Overview|Overview|Introduction|Summary|モデル概要|Model Details)[^\n]*\n(.*?)(?=\n#+\s|\Z)', text, flags=re.MULTILINE | re.DOTALL)
|
| 48 |
+
for match in heading_matches:
|
| 49 |
+
if match.group(2).strip():
|
| 50 |
+
candidates.append(match.group(2).strip())
|
| 51 |
+
|
| 52 |
+
# 2. Inline Labels
|
| 53 |
+
inline_matches = re.finditer(r'(?:Description:|Overview:|### Description:)\s*(.*?)(?=\n\n|\Z)', text, flags=re.DOTALL | re.IGNORECASE)
|
| 54 |
+
for match in inline_matches:
|
| 55 |
+
if match.group(1).strip():
|
| 56 |
+
candidates.append(match.group(1).strip())
|
| 57 |
+
|
| 58 |
+
# 3. Auto-generated fine-tuned leading sentences
|
| 59 |
+
tuned_matches = re.finditer(r'^(?:The .*model is a .*|This model is a fine-tuned version of.*|This is a fine-tuned.*)', text, flags=re.MULTILINE | re.IGNORECASE)
|
| 60 |
+
for match in tuned_matches:
|
| 61 |
+
candidates.append(match.group(0).strip())
|
| 62 |
+
|
| 63 |
+
# 4. Fallback: First meaningful paragraph
|
| 64 |
+
# Strip some HTML first just for the fallback rule
|
| 65 |
+
html_stripped = re.sub(r'<[^>]+>', '', text)
|
| 66 |
+
paragraphs = re.split(r'\n\s*\n', html_stripped)
|
| 67 |
+
for p in paragraphs:
|
| 68 |
+
p = p.strip()
|
| 69 |
+
if not p:
|
| 70 |
+
continue
|
| 71 |
+
if p.startswith('#'):
|
| 72 |
+
continue
|
| 73 |
+
# Skip heavy markdown like links/images/badges and github alerts
|
| 74 |
+
if p.startswith('[!') or p.startswith('<a href') or p.startswith('> [!'):
|
| 75 |
+
continue
|
| 76 |
+
# If a paragraph has many links (like a table of contents / link directory)
|
| 77 |
+
if p.count('](') > 3 or p.count('http') > 3:
|
| 78 |
+
continue
|
| 79 |
+
if len(p) > 50:
|
| 80 |
+
candidates.append(p)
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
return candidates
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
def _score_candidate(text: str) -> float:
|
| 87 |
+
score = 0.0
|
| 88 |
+
text_lower = text.lower()
|
| 89 |
+
|
| 90 |
+
# Length score (sweet spot between 100 and 500 chars)
|
| 91 |
+
if 50 < len(text) < 1000:
|
| 92 |
+
score += 10.0
|
| 93 |
+
|
| 94 |
+
# Reward definitional patterns
|
| 95 |
+
if "is a" in text_lower or "fine-tuned version of" in text_lower or "trained on" in text_lower or "designed for" in text_lower:
|
| 96 |
+
score += 20.0
|
| 97 |
+
|
| 98 |
+
# Penalize bad patterns
|
| 99 |
+
if "leaderboard" in text_lower or "benchmark" in text_lower or "results" in text_lower:
|
| 100 |
+
score -= 50.0
|
| 101 |
+
if "install" in text_lower or "how to run" in text_lower or "pip install" in text_lower or "read our guide" in text_lower:
|
| 102 |
+
score -= 30.0
|
| 103 |
+
|
| 104 |
+
# Penalize table/code-heavy paragraphs and bullet points
|
| 105 |
+
if text.count('|') > 5 or text.count('```') >= 1 or text.count('\n- ') > 2 or text.count('\n* ') > 2:
|
| 106 |
+
score -= 50.0
|
| 107 |
+
|
| 108 |
+
return score
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def _clean_text(text: str) -> str:
|
| 112 |
+
# Remove HTML
|
| 113 |
+
from bs4 import BeautifulSoup
|
| 114 |
+
try:
|
| 115 |
+
soup = BeautifulSoup(text, "html.parser")
|
| 116 |
+
for tag in soup(["style", "script"]):
|
| 117 |
+
tag.decompose()
|
| 118 |
+
text = soup.get_text(separator=' ')
|
| 119 |
+
except Exception:
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
# Remove markdown images
|
| 123 |
+
text = re.sub(r'!\[.*?\]\([^)]+\)', '', text)
|
| 124 |
+
# Convert links to just text
|
| 125 |
+
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
|
| 126 |
+
# Remove code blocks
|
| 127 |
+
text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
|
| 128 |
+
# Remove inline code
|
| 129 |
+
text = re.sub(r'`[^`]*`', '', text)
|
| 130 |
+
# Remove tables
|
| 131 |
+
text = re.sub(r'\|.*?\|', '', text)
|
| 132 |
+
text = re.sub(r'(?m)^[-:| ]+$', '', text) # table separators
|
| 133 |
+
|
| 134 |
+
# Remove boilerplate line by line
|
| 135 |
+
lines = text.split('\n')
|
| 136 |
+
clean_lines = []
|
| 137 |
+
for line in lines:
|
| 138 |
+
line_lower = line.lower()
|
| 139 |
+
if 'generated automatically' in line_lower and 'model card' in line_lower:
|
| 140 |
+
continue
|
| 141 |
+
if 'completed by the model author' in line_lower:
|
| 142 |
+
continue
|
| 143 |
+
if 'model cards for model reporting' in line_lower:
|
| 144 |
+
continue
|
| 145 |
+
clean_lines.append(line)
|
| 146 |
+
text = '\n'.join(clean_lines)
|
| 147 |
+
|
| 148 |
+
# Clean up whitespace
|
| 149 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
| 150 |
+
|
| 151 |
+
return text
|
| 152 |
+
|
| 153 |
+
@classmethod
|
| 154 |
+
def _generate(cls, prompt: str, max_output_chars: int) -> Optional[str]:
|
| 155 |
+
if cls._model is None:
|
| 156 |
+
cls._load_model()
|
| 157 |
+
if not cls._model or not cls._tokenizer:
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
try:
|
| 161 |
+
inputs = cls._tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
|
| 162 |
+
generate_kwargs = {
|
| 163 |
+
"max_length": 128, # Increased by ~30% from 64
|
| 164 |
+
"min_length": 15, # Avoid single word outputs
|
| 165 |
+
"do_sample": False,
|
| 166 |
+
"num_beams": 4,
|
| 167 |
+
"early_stopping": True,
|
| 168 |
+
"repetition_penalty": 2.0
|
| 169 |
+
}
|
| 170 |
+
summary_ids = cls._model.generate(inputs["input_ids"], **generate_kwargs)
|
| 171 |
+
summary = cls._tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
| 172 |
+
|
| 173 |
+
summary = summary.strip()
|
| 174 |
+
|
| 175 |
+
# Remove "Output:" prefix if present
|
| 176 |
+
if summary.lower().startswith("output:"):
|
| 177 |
+
summary = re.sub(r'^Output:\s*', '', summary, flags=re.IGNORECASE)
|
| 178 |
+
|
| 179 |
+
if len(summary) > max_output_chars:
|
| 180 |
+
return summary[:max_output_chars-3] + "..."
|
| 181 |
+
return summary
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.warning(f"⚠️ Generation failed: {e}")
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
@staticmethod
|
| 187 |
+
def _is_valid_summary(summary: str, model_id: str) -> bool:
|
| 188 |
+
if not summary or len(summary) < 15:
|
| 189 |
+
return False
|
| 190 |
+
|
| 191 |
+
summary_lower = summary.lower()
|
| 192 |
+
model_name = model_id.split('/')[-1].lower()
|
| 193 |
+
|
| 194 |
+
if summary_lower == model_name or summary_lower == f"{model_name} model":
|
| 195 |
+
return False
|
| 196 |
+
|
| 197 |
+
# Check for markdown/html artifacts
|
| 198 |
+
if '#' in summary or '<' in summary or '>' in summary or '*' in summary:
|
| 199 |
+
return False
|
| 200 |
+
|
| 201 |
+
# Check for instruction-like text
|
| 202 |
+
if summary_lower.startswith("to install") or summary_lower.startswith("how to") or "pip install" in summary_lower:
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
# Refuse literally copying bullet points (e.g. from table)
|
| 206 |
+
if "- type:" in summary_lower or "number of parameters:" in summary_lower:
|
| 207 |
+
return False
|
| 208 |
+
|
| 209 |
+
return True
|
| 210 |
+
|
| 211 |
+
@classmethod
|
| 212 |
+
def summarize(cls, text: str, max_output_chars: int = 332, model_id: str = "") -> Optional[str]:
|
| 213 |
+
"""
|
| 214 |
+
Robustly extract and summarize model description.
|
| 215 |
+
"""
|
| 216 |
+
if not text or not text.strip():
|
| 217 |
+
return None
|
| 218 |
+
|
| 219 |
+
# 1. Strip YAML safely
|
| 220 |
+
text_without_yaml = cls._strip_yaml_frontmatter(text)
|
| 221 |
+
|
| 222 |
+
# 2. Extract multiple candidate description blocks
|
| 223 |
+
candidates = cls._extract_candidates(text_without_yaml)
|
| 224 |
+
|
| 225 |
+
if not candidates:
|
| 226 |
+
# Fallback if candidates are absolutely empty
|
| 227 |
+
candidates = [text_without_yaml[:1000]]
|
| 228 |
+
|
| 229 |
+
# 3. Score candidates and pick best
|
| 230 |
+
scored_candidates = [(c, cls._score_candidate(c)) for c in candidates]
|
| 231 |
+
best_candidate = max(scored_candidates, key=lambda x: x[1])[0]
|
| 232 |
+
|
| 233 |
+
# 4. Clean aggressively
|
| 234 |
+
cleaned_text = cls._clean_text(best_candidate)
|
| 235 |
+
|
| 236 |
+
if not cleaned_text.strip():
|
| 237 |
+
return None
|
| 238 |
+
|
| 239 |
+
# Extract just the first few sentences of the cleaned text to avoid confusing the small model
|
| 240 |
+
# with training details that usually appear at the end of the paragraph.
|
| 241 |
+
sentences = re.split(r'(?<=[.!?])\s+', cleaned_text)
|
| 242 |
+
short_text = " ".join(sentences[:3])
|
| 243 |
+
|
| 244 |
+
# 5 & 6 & 7. Summarize, Validate, Retry, Fallback
|
| 245 |
+
prompt1 = f"In one sentence, explain what this AI model is designed to do based on this description:\n\n{short_text}"
|
| 246 |
+
|
| 247 |
+
summary = cls._generate(prompt1, max_output_chars)
|
| 248 |
+
|
| 249 |
+
if summary and cls._is_valid_summary(summary, model_id):
|
| 250 |
+
return summary
|
| 251 |
+
|
| 252 |
+
# Retry with stricter prompt
|
| 253 |
+
logger.info("⚠️ First summary invalid, retrying with stricter prompt.")
|
| 254 |
+
prompt2 = f"Summarize the main purpose of this AI model in one complete sentence:\n\n{cleaned_text}"
|
| 255 |
+
summary2 = cls._generate(prompt2, max_output_chars)
|
| 256 |
+
|
| 257 |
+
if summary2 and cls._is_valid_summary(summary2, model_id):
|
| 258 |
+
return summary2
|
| 259 |
+
|
| 260 |
+
# Fallback to cleaned text (first 1-2 sentences)
|
| 261 |
+
logger.info("⚠️ Both LLM summaries invalid, falling back to cleaned extracted text.")
|
| 262 |
+
sentences = re.split(r'(?<=[.!?])\s+', cleaned_text)
|
| 263 |
+
fallback_summary = " ".join(sentences[:2])
|
| 264 |
+
if len(fallback_summary) > max_output_chars:
|
| 265 |
+
return fallback_summary[:max_output_chars-3] + "..."
|
| 266 |
+
return fallback_summary
|
src/utils/validation.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CycloneDX 1.6 Schema Validation for AIBOM Generator.
|
| 3 |
+
|
| 4 |
+
This module provides validation of generated AIBOMs against the official
|
| 5 |
+
CycloneDX 1.6 JSON schema to ensure compliance and interoperability.
|
| 6 |
+
"""
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 11 |
+
|
| 12 |
+
# Make sure to handle requests import if it's not a core dependency (it is in my project)
|
| 13 |
+
import requests
|
| 14 |
+
import jsonschema
|
| 15 |
+
from jsonschema import Draft7Validator, ValidationError
|
| 16 |
+
from referencing import Registry, Resource
|
| 17 |
+
|
| 18 |
+
# Module-level logger
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# CycloneDX schema configuration
|
| 22 |
+
CYCLONEDX_1_6_SCHEMA_URL = "https://raw.githubusercontent.com/CycloneDX/specification/master/schema/bom-1.6.schema.json"
|
| 23 |
+
# Correct path relative to this file: src/utils/../schemas -> src/schemas
|
| 24 |
+
SCHEMA_CACHE_DIR = Path(__file__).parent.parent / "schemas"
|
| 25 |
+
SCHEMA_CACHE_FILE = SCHEMA_CACHE_DIR / "bom-1.6.schema.json"
|
| 26 |
+
|
| 27 |
+
# Global schema cache
|
| 28 |
+
_cached_schema: Optional[Dict[str, Any]] = None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _ensure_cache_dir() -> None:
|
| 32 |
+
"""Ensure the schema cache directory exists."""
|
| 33 |
+
SCHEMA_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _load_schema_from_cache() -> Optional[Dict[str, Any]]:
|
| 37 |
+
"""Load schema from local cache if available."""
|
| 38 |
+
if SCHEMA_CACHE_FILE.exists():
|
| 39 |
+
try:
|
| 40 |
+
with open(SCHEMA_CACHE_FILE, "r", encoding="utf-8") as f:
|
| 41 |
+
schema = json.load(f)
|
| 42 |
+
logger.debug("Loaded CycloneDX 1.6 schema from cache")
|
| 43 |
+
return schema
|
| 44 |
+
except (json.JSONDecodeError, IOError) as e:
|
| 45 |
+
logger.warning("Failed to load cached schema: %s", e)
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _download_schema() -> Optional[Dict[str, Any]]:
|
| 50 |
+
"""Download the CycloneDX 1.6 schema from the official repository."""
|
| 51 |
+
try:
|
| 52 |
+
logger.info("Downloading CycloneDX 1.6 schema from %s", CYCLONEDX_1_6_SCHEMA_URL)
|
| 53 |
+
response = requests.get(CYCLONEDX_1_6_SCHEMA_URL, timeout=30)
|
| 54 |
+
response.raise_for_status()
|
| 55 |
+
schema = response.json()
|
| 56 |
+
|
| 57 |
+
# Cache the schema locally
|
| 58 |
+
_ensure_cache_dir()
|
| 59 |
+
with open(SCHEMA_CACHE_FILE, "w", encoding="utf-8") as f:
|
| 60 |
+
json.dump(schema, f, indent=2)
|
| 61 |
+
logger.info("CycloneDX 1.6 schema downloaded and cached")
|
| 62 |
+
|
| 63 |
+
return schema
|
| 64 |
+
except requests.RequestException as e:
|
| 65 |
+
logger.error("Failed to download CycloneDX schema: %s", e)
|
| 66 |
+
return None
|
| 67 |
+
except (json.JSONDecodeError, IOError) as e:
|
| 68 |
+
logger.error("Failed to parse or cache schema: %s", e)
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def load_schema(force_download: bool = False) -> Optional[Dict[str, Any]]:
|
| 73 |
+
"""
|
| 74 |
+
Load the CycloneDX 1.6 JSON schema.
|
| 75 |
+
|
| 76 |
+
Uses in-memory cache first, then file cache, then downloads if needed.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
force_download: If True, download fresh schema even if cached.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
The schema dictionary, or None if loading failed.
|
| 83 |
+
"""
|
| 84 |
+
global _cached_schema
|
| 85 |
+
|
| 86 |
+
# Return in-memory cache if available
|
| 87 |
+
if _cached_schema is not None and not force_download:
|
| 88 |
+
return _cached_schema
|
| 89 |
+
|
| 90 |
+
# Try loading from file cache
|
| 91 |
+
if not force_download:
|
| 92 |
+
schema = _load_schema_from_cache()
|
| 93 |
+
if schema:
|
| 94 |
+
_cached_schema = schema
|
| 95 |
+
return schema
|
| 96 |
+
|
| 97 |
+
# Download fresh schema
|
| 98 |
+
schema = _download_schema()
|
| 99 |
+
if schema:
|
| 100 |
+
_cached_schema = schema
|
| 101 |
+
|
| 102 |
+
return schema
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _format_validation_error(error: ValidationError) -> str:
|
| 106 |
+
"""Format a validation error into a readable message."""
|
| 107 |
+
path = " -> ".join(str(p) for p in error.absolute_path) if error.absolute_path else "root"
|
| 108 |
+
return f"[{path}] {error.message}"
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def validate_aibom(aibom: Dict[str, Any], strict: bool = False) -> Tuple[bool, List[str]]:
|
| 112 |
+
"""
|
| 113 |
+
Validate an AIBOM against the CycloneDX 1.6 schema.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
aibom: The AIBOM dictionary to validate.
|
| 117 |
+
strict: If True, fail on any schema deviation. If False, collect all errors.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
Tuple of (is_valid, list of error messages).
|
| 121 |
+
If valid, returns (True, []).
|
| 122 |
+
If invalid, returns (False, [error1, error2, ...]).
|
| 123 |
+
"""
|
| 124 |
+
schema = load_schema()
|
| 125 |
+
|
| 126 |
+
if schema is None:
|
| 127 |
+
logger.warning("Could not load CycloneDX schema - skipping validation")
|
| 128 |
+
return True, ["Schema unavailable"]
|
| 129 |
+
|
| 130 |
+
# Load SPDX schema for reference resolution
|
| 131 |
+
spdx_path = SCHEMA_CACHE_DIR / "spdx.schema.json"
|
| 132 |
+
registry = Registry()
|
| 133 |
+
if spdx_path.exists():
|
| 134 |
+
try:
|
| 135 |
+
with open(spdx_path, "r", encoding="utf-8") as f:
|
| 136 |
+
spdx_schema = json.load(f)
|
| 137 |
+
resource = Resource.from_contents(spdx_schema)
|
| 138 |
+
registry = registry.with_resource(uri="spdx.schema.json", resource=resource)
|
| 139 |
+
except Exception as e:
|
| 140 |
+
logger.warning("Failed to load SPDX schema for validation: %s", e)
|
| 141 |
+
|
| 142 |
+
validator = Draft7Validator(schema, registry=registry)
|
| 143 |
+
errors = sorted(validator.iter_errors(aibom), key=lambda e: e.path)
|
| 144 |
+
|
| 145 |
+
if not errors:
|
| 146 |
+
return True, []
|
| 147 |
+
|
| 148 |
+
error_messages = [_format_validation_error(e) for e in errors]
|
| 149 |
+
return False, error_messages
|
| 150 |
+
|
| 151 |
+
def get_validation_summary(aibom: Dict[str, Any]) -> Dict[str, Any]:
|
| 152 |
+
"""Get a summary of schema validation results."""
|
| 153 |
+
is_valid, errors = validate_aibom(aibom)
|
| 154 |
+
return {
|
| 155 |
+
"valid": is_valid,
|
| 156 |
+
"error_count": len(errors),
|
| 157 |
+
"errors": errors[:10] if not is_valid else [] # Limit to first 10
|
| 158 |
+
}
|