From d08ac822060413ab78f078ffbd5054fc403bf978 Mon Sep 17 00:00:00 2001
From: Artem Baranovskyi <artem.baranovsky1980@gmail.com>
Date: Sun, 2 Feb 2025 00:01:34 +0200
Subject: [PATCH] Initial commit. Added Docker compose with 3 containers -
 postgre, flowise, ollama. Works on fly. Just fill the .env creds.

---
 .env.Example                |  3 +++
 docker-compose.yml          | 11 +++++++----
 ollama-models/Dockerfile    | 24 ++++++++++++++++++++++++
 ollama-models/load_model.py | 14 ++++++++++++++
 ollama-models/start.sh      |  7 +++++++
 5 files changed, 55 insertions(+), 4 deletions(-)
 create mode 100644 ollama-models/Dockerfile
 create mode 100644 ollama-models/load_model.py
 create mode 100644 ollama-models/start.sh

diff --git a/.env.Example b/.env.Example
index 7af3663..ea1554d 100644
--- a/.env.Example
+++ b/.env.Example
@@ -1,4 +1,6 @@
+HF_TOKEN=
 LANGCHAIN_API_KEY=
+
 PORT=3000
 FLOWISE_USERNAME=
 FLOWISE_PASSWORD=
@@ -15,3 +17,4 @@ DATABASE_NAME=flowise
 DATABASE_USER=
 DATABASE_PASSWORD=
 PGSSLMODE=require
+
diff --git a/docker-compose.yml b/docker-compose.yml
index 246976b..b951c1e 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -44,21 +44,24 @@ services:
       DATABASE_NAME: ${DATABASE_NAME}
       DATABASE_USER: ${DATABASE_USER}
       DATABASE_PASSWORD: ${DATABASE_PASSWORD}
-#    restart: on-failure:5
     depends_on:
       - ollama
       - flowise-db
     entrypoint: /bin/sh -c "sleep 3; flowise start"
 
   ollama:
-    image: ollama/ollama
+    build:
+      context: ./ollama-models
+      dockerfile: Dockerfile
     container_name: ollama
-    restart: unless-stopped
+#    restart: unless-stopped
     volumes:
       - ollama_data:/root/.ollama
     ports:
       - "11434:11434"
+    environment:
+      HF_TOKEN: ${HF_TOKEN}
 
 volumes:
   flowise-db-data:
-  ollama_data:
\ No newline at end of file
+  ollama_data:
diff --git a/ollama-models/Dockerfile b/ollama-models/Dockerfile
new file mode 100644
index 0000000..eef30ce
--- /dev/null
+++ b/ollama-models/Dockerfile
@@ -0,0 +1,24 @@
+# Use the official Python image as the base image
+FROM ollama/ollama
+
+# Install necessary dependencies
+RUN apt-get update && apt-get install -y curl
+
+# Set the working directory in the container
+RUN mkdir /app
+
+# Set the working directory
+WORKDIR /app
+
+# Copy the script to download the model
+COPY load_model.py /app/load_model.py
+
+# Set Hugging Face token for authentication
+ARG HF_TOKEN
+ENV HF_TOKEN=${HF_TOKEN}
+
+COPY start.sh /app/start.sh
+RUN chmod +x /app/start.sh
+
+# Use the start.sh script as the entrypoint
+ENTRYPOINT ["/app/start.sh"]
\ No newline at end of file
diff --git a/ollama-models/load_model.py b/ollama-models/load_model.py
new file mode 100644
index 0000000..3436565
--- /dev/null
+++ b/ollama-models/load_model.py
@@ -0,0 +1,14 @@
+import os
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+# Set the Hugging Face token for authentication
+hf_token = os.environ.get("HF_TOKEN")
+if hf_token is None:
+    raise ValueError("Hugging Face token is not set")
+
+model_name = "meta-llama/Llama-3.2-1B"
+
+model = AutoModelForCausalLM.from_pretrained(model_name)
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+print("Model loaded successfully!")
diff --git a/ollama-models/start.sh b/ollama-models/start.sh
new file mode 100644
index 0000000..7388fbf
--- /dev/null
+++ b/ollama-models/start.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Pull the Ollama model
+ollama pull llama3.2:1b
+# Run the Ollama model
+ollama run llama3.2:1b &
+# Serve the Ollama model
+ollama serve
-- 
GitLab