Commit 5c4d4470 authored by Artem Baranovskyi's avatar Artem Baranovskyi
Browse files

Fixed ollama stable model load and chat functionality.

Please fill own credentials at .env
Create and add your HuggingFace token at .env
Import json config for Chatflow.
parent d08ac822
No related merge requests found
Showing with 705 additions and 6 deletions
+705 -6
This diff is collapsed.
......@@ -15,6 +15,8 @@ services:
interval: 5s
timeout: 5s
retries: 5
networks:
- flowise_network
flowise:
image: flowiseai/flowise:latest
......@@ -48,6 +50,8 @@ services:
- ollama
- flowise-db
entrypoint: /bin/sh -c "sleep 3; flowise start"
networks:
- flowise_network
ollama:
build:
......@@ -61,7 +65,13 @@ services:
- "11434:11434"
environment:
HF_TOKEN: ${HF_TOKEN}
GIN_MODE: ${GIN_MODE}
networks:
- flowise_network
volumes:
flowise-db-data:
ollama_data:
networks:
flowise_network:
\ No newline at end of file
# Use the official Python image as the base image
FROM ollama/ollama
ENV OLLAMA_ORIGINS=*
ENV OLLAMA_HOST=0.0.0.0
# Install necessary dependencies
RUN apt-get update && apt-get install -y curl
......
#!/bin/bash
# Pull the Ollama model
ollama pull llama3.2:1b
# Run the Ollama model
ollama run llama3.2:1b &
# Serve the Ollama model
ollama serve
MODEL_NAME="llama3.2:1b"
if ! ollama list | grep -q "$MODEL_NAME"; then
echo "Model is not found. Loading..."
ollama pull "$MODEL_NAME"
else
echo "Model is already loaded."
fi
ollama run "$MODEL_NAME" &
ollama serve
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment