Use cuda-enabled base image
Since I can't build cuda images locally (without nvidia gpu), we make use of an existing one. That also means downgrading python to Debian's version 3.10.
This commit is contained in:
parent
56fa2eca25
commit
3246469be2
2 changed files with 4 additions and 6 deletions
|
@ -1,4 +1,4 @@
|
|||
ARG PYTHON_VERSION=3.11.0
|
||||
ARG PYTHON_VERSION=3.10.0
|
||||
ARG POETRY_VERSION=1.5.0
|
||||
|
||||
FROM python:${PYTHON_VERSION} as staging
|
||||
|
@ -31,7 +31,8 @@ RUN poetry install
|
|||
|
||||
# --------------
|
||||
|
||||
FROM staging as worker
|
||||
FROM ghcr.io/pytorch/pytorch-nightly:668af07-cu11.8.0 as worker
|
||||
|
||||
ARG APP_NAME=verbanote
|
||||
ARG APP_PATH=/verbanote
|
||||
ARG VERBANOTE_OUTPUT_PATH=/out
|
||||
|
|
|
@ -7,18 +7,15 @@ readme = "README.md"
|
|||
# packages = [{ include = "verbanote" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
python = "^3.10"
|
||||
requests = "^2.31.0"
|
||||
static-ffmpeg = "^2.5"
|
||||
runpod = "^1.1.3"
|
||||
pydub = "^0.25.1"
|
||||
#light-the-torch = "^0.7.5"
|
||||
light-the-torch = "^0.7.5"
|
||||
hmmlearn = {git = "https://github.com/hmmlearn/hmmlearn.git"}
|
||||
pyannote-audio = {git = "https://github.com/pyannote/pyannote-audio.git", rev = "develop"}
|
||||
#openai-whisper = { git = "https://github.com/openai/whisper.git" }
|
||||
#hmmlearn = {git = "https://github.com/hmmlearn/hmmlearn.git"}
|
||||
#pyannote-audio = {git = "https://github.com/pyannote/pyannote-audio.git", rev = "develop"}
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
|
Loading…
Reference in a new issue