Delete entrypoint.sh
Browse files- entrypoint.sh +0 -38
entrypoint.sh
DELETED
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
#!/bin/bash
|
| 2 |
-
set -e
|
| 3 |
-
|
| 4 |
-
# Default inference URL for internal inference model service
|
| 5 |
-
DEFAULT_INFERENCE_URL="http://localhost:8000/extract"
|
| 6 |
-
export AIBOM_INFERENCE_URL=${AIBOM_INFERENCE_URL:-$DEFAULT_INFERENCE_URL}
|
| 7 |
-
|
| 8 |
-
echo "Using AIBOM_INFERENCE_URL: $AIBOM_INFERENCE_URL"
|
| 9 |
-
|
| 10 |
-
# Check if command-line arguments are provided
|
| 11 |
-
if [ -n "$1" ]; then
|
| 12 |
-
case "$1" in
|
| 13 |
-
server)
|
| 14 |
-
# Start the API server explicitly (recommended for Hugging Face Spaces)
|
| 15 |
-
echo "Starting AIBOM Generator API server..."
|
| 16 |
-
exec uvicorn src.aibom_generator.api:app --host 0.0.0.0 --port ${PORT:-7860}
|
| 17 |
-
;;
|
| 18 |
-
worker)
|
| 19 |
-
# Start the background worker
|
| 20 |
-
echo "Starting AIBOM Generator background worker..."
|
| 21 |
-
exec python -m src.aibom_generator.worker
|
| 22 |
-
;;
|
| 23 |
-
inference)
|
| 24 |
-
# Start the inference model server
|
| 25 |
-
echo "Starting AIBOM Generator inference model server..."
|
| 26 |
-
exec python -m src.aibom_generator.inference_model --host 0.0.0.0 --port ${PORT:-8000}
|
| 27 |
-
;;
|
| 28 |
-
*)
|
| 29 |
-
# Run as CLI with provided arguments
|
| 30 |
-
echo "Running AIBOM Generator CLI..."
|
| 31 |
-
exec python -m src.aibom_generator.cli "$@"
|
| 32 |
-
;;
|
| 33 |
-
esac
|
| 34 |
-
else
|
| 35 |
-
# Default behavior (if no arguments): start API server (web UI mode)
|
| 36 |
-
echo "Starting AIBOM Generator API server (web UI)..."
|
| 37 |
-
exec uvicorn src.aibom_generator.api:app --host 0.0.0.0 --port ${PORT:-7860}
|
| 38 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|