verbanote-server/pyproject.toml
Marty Oehme 3246469be2
Use cuda-enabled base image
Since I can't build cuda images locally (without nvidia gpu), we make use
of an existing one. That also means downgrading python to Debian's version
3.10.
2023-08-22 20:35:25 +02:00

22 lines
638 B
TOML

[tool.poetry]
name = "verbanote"
version = "0.1.0"
description = ""
authors = ["Marty Oehme <marty.oehme@gmail.com>"]
readme = "README.md"
# packages = [{ include = "verbanote" }]
[tool.poetry.dependencies]
python = "^3.10"
requests = "^2.31.0"
static-ffmpeg = "^2.5"
runpod = "^1.1.3"
pydub = "^0.25.1"
light-the-torch = "^0.7.5"
hmmlearn = {git = "https://github.com/hmmlearn/hmmlearn.git"}
pyannote-audio = {git = "https://github.com/pyannote/pyannote-audio.git", rev = "develop"}
#openai-whisper = { git = "https://github.com/openai/whisper.git" }
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"