Commit d08ac822 authored by Artem Baranovskyi's avatar Artem Baranovskyi
Browse files

Initial commit. Added Docker compose with 3 containers - postgre, flowise, ollama.

Works on fly. Just fill the .env creds.
parent bef46286
No related merge requests found
Showing with 55 additions and 4 deletions
+55 -4
HF_TOKEN=
LANGCHAIN_API_KEY=
PORT=3000
FLOWISE_USERNAME=
FLOWISE_PASSWORD=
......@@ -15,3 +17,4 @@ DATABASE_NAME=flowise
DATABASE_USER=
DATABASE_PASSWORD=
PGSSLMODE=require
......@@ -44,21 +44,24 @@ services:
DATABASE_NAME: ${DATABASE_NAME}
DATABASE_USER: ${DATABASE_USER}
DATABASE_PASSWORD: ${DATABASE_PASSWORD}
# restart: on-failure:5
depends_on:
- ollama
- flowise-db
entrypoint: /bin/sh -c "sleep 3; flowise start"
ollama:
image: ollama/ollama
build:
context: ./ollama-models
dockerfile: Dockerfile
container_name: ollama
restart: unless-stopped
# restart: unless-stopped
volumes:
- ollama_data:/root/.ollama
ports:
- "11434:11434"
environment:
HF_TOKEN: ${HF_TOKEN}
volumes:
flowise-db-data:
ollama_data:
\ No newline at end of file
ollama_data:
# Use the official Python image as the base image
FROM ollama/ollama
# Install necessary dependencies
RUN apt-get update && apt-get install -y curl
# Set the working directory in the container
RUN mkdir /app
# Set the working directory
WORKDIR /app
# Copy the script to download the model
COPY load_model.py /app/load_model.py
# Set Hugging Face token for authentication
ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}
COPY start.sh /app/start.sh
RUN chmod +x /app/start.sh
# Use the start.sh script as the entrypoint
ENTRYPOINT ["/app/start.sh"]
\ No newline at end of file
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
# Set the Hugging Face token for authentication
hf_token = os.environ.get("HF_TOKEN")
if hf_token is None:
raise ValueError("Hugging Face token is not set")
model_name = "meta-llama/Llama-3.2-1B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
print("Model loaded successfully!")
#!/bin/bash
# Pull the Ollama model
ollama pull llama3.2:1b
# Run the Ollama model
ollama run llama3.2:1b &
# Serve the Ollama model
ollama serve
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment