Update entrypoint.py

This commit is contained in:
cmdrunematti 2025-10-20 13:13:30 +00:00
parent 666680babb
commit 4fec3f6596
1 changed files with 41 additions and 80 deletions

View File

@ -1,85 +1,46 @@
# main.py
from qdrant_client.http import models as qmodels
import uuid
from fastapi import FastAPI
import requests, os, redis
from qdrant_client import QdrantClient
from langchain_community.vectorstores import Qdrant
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import langchain
from langchain.agents import tool
from langchain.tools import searxng, qdrant, llma_server
app = FastAPI()
# Environment variables
SEARXNG_URL = os.environ['SEARXNG_URL']
QDRANT_URL = os.environ['QDRANT_URL']
LLAMA_SERVER_URL = os.environ['LLAMA_SERVER_URL']
# Connectors
qdrant = QdrantClient(url=os.getenv("QDRANT_URL"))
r = redis.Redis.from_url(os.getenv("REDIS_URL"))
SEARXNG_URL = os.getenv("SEARXNG_URL")
LLAMA_SERVER_URL = os.getenv("LLAMA_SERVER_URL")
QDRANT_URL = os.getenv("QDRANT_URL")
REDIS_URL = os.getenv("REDIS_URL")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
@app.get("/ask")
def ask(query: str):
# 1. Search via SearxNG
resp = requests.get(f"{SEARXNG_URL}/search", params={"q": query, "format": "json"})
snippets = [r["title"] + " " + r["content"] for r in resp.json()["results"][:3]]
context = " ".join(snippets)
# 2. Embed + store in Qdrant
vectors = embeddings.embed_documents(snippets)
points = [
qmodels.PointStruct(
id=str(uuid.uuid4()), # unique ID for each snippet
vector=vec, # the embedding vector
payload={"text": text} # optional metadata
)
for text, vec in zip(snippets, vectors)
]
qdrant.upsert(collection_name="docs", points=points)
# 3. Call llama-server
llama_http = requests.post(
f"{LLAMA_SERVER_URL}/completion",
json={
"prompt": f"Context:\n{context}\n\nQuestion: {query}\nAnswer:",
"n_predict": 128,
"temperature": 0.7,
"stop": ["</s>"]
}
)
llm_resp = llama_http.json()
print("Llama raw response:", llm_resp)
return {"answer": llm_resp.get("content", "")}
@app.get("/health")
def health():
results = {}
# Check Redis
try:
r = redis.Redis.from_url(REDIS_URL)
r.ping()
results["redis"] = "ok"
except Exception as e:
results["redis"] = f"error: {e}"
# Check Qdrant
try:
q = requests.get(f"{QDRANT_URL}/readyz")
results["qdrant"] = f"ok ({q.status_code})"
except Exception as e:
results["qdrant"] = f"error: {e}"
# Check SearxNG
try:
s = requests.get(f"{SEARXNG_URL}/search",
params={"q": "ping", "format": "json"},
headers={"X-Forwarded-For": "127.0.0.1"})
results["searxng"] = f"ok ({s.status_code})"
except Exception as e:
results["searxng"] = f"error: {e}"
# Define the LangChain agent
class SearchAgent(langchain.Agent):
def __init__(self):
super().__init__()
self.searxng_tool = searxng.SearXNGTool(SEARXNG_URL=SEARXNG_URL)
self.qdrant_tool = qdrant.QDrantTool(QDRANT_URL=QDRANT_URL)
self.llma_server_tool = llma_server.LLaMAServerTool(LLAMA_SERVER_URL=LLAMA_SERVER_URL)
async def search(self, query):
# Send query to SearXNG API
results = await self.searxng_tool.search(query)
# Embed results with QDrant
await self.qdrant_tool.embed(results)
return results
async def query(self, query):
# Use LLaMA-Server to process query
response = await self.llma_server_tool.query(query)
return response
# Create the LangChain agent
agent = SearchAgent()
# Define the API endpoints
from fastapi import FastAPI
app = FastAPI()
@app.post("/search")
async def search(query: str):
results = await agent.search(query)
return results
@app.post("/query")
async def query(query: str):
response = await agent.query(query)
return response