-
Notifications
You must be signed in to change notification settings - Fork 29
/
Copy path.env
51 lines (43 loc) · 2.62 KB
/
.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# .env file
# Docker image configuration
IMAGE_TAG=main # Docker image tag to use for building the Docker image
PORT=3000 # Port to use for running the web interface
# Database configuration
POSTGRES_PORT=5432 # Port to use for PostgreSQL database
POSTGRES_USER=root # Username for PostgreSQL database
POSTGRES_PASSWORD=mysecretpassword # Password for PostgreSQL database
POSTGRES_DB=local # Database name
DATABASE_URL=postgres://root:mysecretpassword@db:5432/local # Database URL for connection to PostgreSQL database with credentials from above
# Application configuration
ADMIN_USERNAME=admin # Username for admin user in web interface
ADMIN_PASSWORD=password # Password for admin user in web interface
# AI configuration
# Default Model to use for transcription, can be set to any OpenAI model or Ollama model
# For ollama connections, enter the model name and version number. EG: llama3.2:latest
AI_MODEL="gpt-3.5-turbo"
# Leave blank to use default (OpenAI API), otherwise set to the base URL of your OpenAI API compatible server
# For ollama connections, enter the IP of the Ollama server, and then the port it is running on.
# Include the /v1/ or /api/v1/ path if needed (OpenWeb UI uses /api/ and ollama uses /v1/
# Example: http://192.168.1.5:11434 or http://host.docker.internal:11434
# NOTE: host.docker.internal is only available on Windows and MacOS, use the IP address of the host machine on Linux
# NOTE: localhost and 127.0.0.1 will not work, as they refer to the container itself, not the host machine
OLLAMA_BASE_URL=""
# API Keys
# NOTE:
# If using Ollama, you can leave these blank or set to a dummy value
# If using OpenAI, you must set these to your API keys
# If using a custom API compatible server, you must set these to your API keys
OPENAI_API_KEY="" # Needed for retrieving models from OpenAI, for Ollama connections, this can be left blank or set to a dummy value
HF_API_KEY="" # Needed for retrieving models from HuggingFace for Diarization
# Diarization configuration
# Default Model to use for Diarization, can be set to any HuggingFace model that supports diarization
# NOTE: This model will be downloaded automatically if it is not already present in the models directory
# NOTE: You can use any model that supports diarization, but the default model is pyannote/speaker-diarization
# NOTE: You can find a list of models that support diarization here: https://huggingface.co/models?other=speaker-diarization
DIARIZATION_MODEL=pyannote/speaker-diarization
MODELS_DIR=/scriberr/models
WORK_DIR=/scriberr/temp
AUDIO_DIR=/scriberr/uploads
# Server configuration
BODY_SIZE_LIMIT=1G
HARDWARE_ACCEL=cpu # Set to 'gpu' if you have a Nvidia GPU