updates
This commit is contained in:
418
llm_be/chat_backend/consumers.py
Normal file
418
llm_be/chat_backend/consumers.py
Normal file
@@ -0,0 +1,418 @@
|
||||
import json
|
||||
import base64
|
||||
import logging
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
from django.utils import timezone
|
||||
from django.conf import settings
|
||||
from django.core.files.base import ContentFile
|
||||
from channels.generic.websocket import AsyncWebsocketConsumer
|
||||
from channels.db import database_sync_to_async
|
||||
from channels.layers import get_channel_layer
|
||||
from asgiref.sync import sync_to_async, async_to_sync
|
||||
from langchain_core.messages import HumanMessage, AIMessage
|
||||
from langchain_community.vectorstores import Chroma
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from langchain_core.runnables import RunnableLambda, RunnableBranch, RunnablePassthrough
|
||||
from langchain_core.tracers.context import collect_runs
|
||||
|
||||
from .models import Conversation, Prompt, PromptMetric, DocumentWorkspace, Document, CustomUser
|
||||
from .serializers import PromptSerializer
|
||||
from .services.llm_service import AsyncLLMService
|
||||
from .services.rag_services import AsyncRAGService
|
||||
from .services.title_generator import title_generator
|
||||
from .services.moderation_classifier import moderation_classifier, ModerationLabel
|
||||
from .services.prompt_classifier.prompt_classifier import PromptClassifier, PromptType
|
||||
from .services.data_analysis_service import AsyncDataAnalysisService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHANNEL_NAME: str = "llm_messages"
|
||||
MODEL_NAME: str = "llama3.2"
|
||||
PROMPT_CLASSIFIER = PromptClassifier()
|
||||
|
||||
@database_sync_to_async
|
||||
def create_conversation(prompt, email, title):
|
||||
# return the conversation id
|
||||
conversation = Conversation.objects.create(title=title)
|
||||
conversation.save()
|
||||
|
||||
user = CustomUser.objects.get(email=email)
|
||||
conversation.user_id = user.id
|
||||
conversation.save()
|
||||
return conversation.id
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_workspace(conversation_id):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
return DocumentWorkspace.objects.get(company=conversation.user.company)
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_messages(conversation_id, prompt, file_string: str = None, file_type: str = ""):
|
||||
messages = []
|
||||
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
logger.debug(file_string)
|
||||
|
||||
# add the prompt to the conversation
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": prompt,
|
||||
"user_created": True,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance.save()
|
||||
if file_string:
|
||||
file_name = f"prompt_{prompt_instance.id}_data.{file_type}"
|
||||
f = ContentFile(file_string, name=file_name)
|
||||
prompt_instance.file.save(file_name, f)
|
||||
prompt_instance.file_type = file_type
|
||||
prompt_instance.save()
|
||||
|
||||
for prompt_obj in Prompt.objects.filter(conversation__id=conversation_id):
|
||||
messages.append(
|
||||
{
|
||||
"content": prompt_obj.message,
|
||||
"role": "user" if prompt_obj.user_created else "assistant",
|
||||
"has_file": prompt_obj.file_exists(),
|
||||
"file": prompt_obj.file if prompt_obj.file_exists() else None,
|
||||
"file_type": prompt_obj.file_type if prompt_obj.file_exists() else None,
|
||||
}
|
||||
)
|
||||
|
||||
# now transform the messages
|
||||
transformed_messages = []
|
||||
for message in messages:
|
||||
|
||||
if message["has_file"] and message["file_type"] != None:
|
||||
if "csv" in message["file_type"]:
|
||||
file_type = "csv"
|
||||
altered_message = f"{message['content']}\n The file type is csv and the file contents are: {message['file'].read()}"
|
||||
elif "xlsx" in message["file_type"]:
|
||||
file_type = "xlsx"
|
||||
df = pd.read_excel(message["file"].read())
|
||||
altered_message = f"{message['content']}\n The file type is xlsx and the file contents are: {df}"
|
||||
elif "txt" in message["file_type"]:
|
||||
file_type = "txt"
|
||||
altered_message = f"{message['content']}\n The file type is csv and the file contents are: {message['file'].read()}"
|
||||
else:
|
||||
altered_message = message["content"]
|
||||
else:
|
||||
altered_message = message["content"]
|
||||
|
||||
transformed_message = (
|
||||
AIMessage(content=altered_message)
|
||||
if message["role"] == "assistant"
|
||||
else HumanMessage(content=altered_message)
|
||||
)
|
||||
transformed_messages.append(transformed_message)
|
||||
|
||||
return transformed_messages, prompt_instance
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def save_generated_message(conversation_id, message):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
|
||||
# add the prompt to the conversation
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": message,
|
||||
"user_created": False,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid():
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance = serializer.save()
|
||||
else:
|
||||
print(serializer.errors)
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def create_prompt_metric(
|
||||
prompt_id, prompt, has_file, file_type, model_name, conversation_id
|
||||
):
|
||||
prompt_metric = PromptMetric.objects.create(
|
||||
prompt_id=prompt_id,
|
||||
start_time=timezone.now(),
|
||||
prompt_length=len(prompt),
|
||||
has_file=has_file,
|
||||
file_type=file_type,
|
||||
model_name=model_name,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
prompt_metric.save()
|
||||
return prompt_metric
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def update_prompt_metric(prompt_metric, status):
|
||||
prompt_metric.event = status
|
||||
prompt_metric.save()
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def finish_prompt_metric(prompt_metric, response_length):
|
||||
logger.info(f"finish_prompt_metric: {response_length}")
|
||||
prompt_metric.end_time = timezone.now()
|
||||
prompt_metric.reponse_length = response_length
|
||||
prompt_metric.event = "FINISHED"
|
||||
prompt_metric.save(update_fields=["end_time", "reponse_length", "event"])
|
||||
logger.info("finish_prompt_metric saved")
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_retriever(conversation_id):
|
||||
logger.info(f"getting workspace from conversation: {conversation_id}")
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
logger.info(f"Got conversation: {conversation}")
|
||||
workspace = DocumentWorkspace.objects.get(company=conversation.user.company)
|
||||
logger.info(f"Got workspace: {conversation}")
|
||||
vectorstore = Chroma(
|
||||
persist_directory=f"./chroma_db/",
|
||||
embedding=OllamaEmbeddings(model="llama3.2"),
|
||||
)
|
||||
return vectorstore.as_retriever()
|
||||
|
||||
async def get_conversation_file_async(conversation_id):
|
||||
try:
|
||||
# Get the very first prompt in the conversation that has a file
|
||||
prompt_with_file = await Prompt.objects.filter(
|
||||
conversation_id=conversation_id
|
||||
).exclude(file='').order_by('created').afirst()
|
||||
|
||||
if prompt_with_file and prompt_with_file.file:
|
||||
# You must use sync_to_async to access the file's binary content
|
||||
file_data = await sync_to_async(prompt_with_file.file.read)()
|
||||
file_type = prompt_with_file.file_type
|
||||
return file_data, file_type
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving file from conversation history: {e}")
|
||||
return None, None
|
||||
|
||||
class ChatConsumerAgain(AsyncWebsocketConsumer):
|
||||
async def connect(self):
|
||||
await self.accept()
|
||||
|
||||
async def disconnect(self, close_code):
|
||||
await self.close()
|
||||
|
||||
async def send_json_message(self, data_str):
|
||||
"""
|
||||
Ensures that the message sent over the websocket is a valid JSON object.
|
||||
If data_str is a plain string, it wraps it in {"type": "text", "content": ...}.
|
||||
"""
|
||||
try:
|
||||
# Test if it's already a valid JSON object string
|
||||
json.loads(data_str)
|
||||
# If it is, send it as is
|
||||
await self.send(data_str)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# If it's a plain string or not JSON-decodable, wrap it
|
||||
await self.send(data_str)
|
||||
|
||||
async def receive(self, text_data=None, bytes_data=None):
|
||||
logger.debug(f"Text Data: {text_data}")
|
||||
logger.debug(f"Bytes Data: {bytes_data}")
|
||||
if text_data:
|
||||
data = json.loads(text_data)
|
||||
message = data.get("message", None)
|
||||
conversation_id = data.get("conversation_id", None)
|
||||
email = data.get("email", None)
|
||||
file = data.get("file", None)
|
||||
file_type = data.get("fileType", "")
|
||||
model = data.get("modelName", "Turbo")
|
||||
|
||||
if not conversation_id:
|
||||
# we need to create a new conversation
|
||||
# we will generate a name for it too
|
||||
title = await title_generator.generate_async(message)
|
||||
conversation_id = await create_conversation(message, email, title)
|
||||
|
||||
if conversation_id:
|
||||
decoded_file = None
|
||||
|
||||
if file:
|
||||
decoded_file = base64.b64decode(file)
|
||||
logger.debug(decoded_file)
|
||||
# The `altered_message` should only be created if a file exists
|
||||
# and you want to pass its content directly to the classifier.
|
||||
# Here, we'll let the classifier decide based on the user's prompt
|
||||
# and then handle the file content separately.
|
||||
altered_message = message
|
||||
if "csv" in file_type:
|
||||
file_type = "csv"
|
||||
#altered_message = f"{message}\n The file type is csv and the file contents are: {decoded_file}"
|
||||
elif "xmlformats-officedocument" in file_type:
|
||||
file_type = "xlsx"
|
||||
#df = pd.read_excel(decoded_file)
|
||||
#altered_message = f"{message}\n The file type is xlsx and the file contents are: {df}"
|
||||
elif "word" in file_type:
|
||||
file_type = "docx"
|
||||
elif "pdf" in file_type:
|
||||
file_type = "pdf"
|
||||
elif "text" in file_type:
|
||||
file_type = "txt"
|
||||
#altered_message = f"{message}\n The file type is txt and the file contents are: {decoded_file}"
|
||||
else:
|
||||
file_type = "Not Sure"
|
||||
|
||||
logger.info(f'received: "{message}" for conversation {conversation_id}')
|
||||
|
||||
# --- LangSmith Pipeline Construction ---
|
||||
|
||||
async def check_moderation(input_dict):
|
||||
msg = input_dict["message"]
|
||||
label = await moderation_classifier.classify_async(msg)
|
||||
return {**input_dict, "moderation_label": label}
|
||||
|
||||
async def classify_prompt_step(input_dict):
|
||||
if input_dict["moderation_label"] == ModerationLabel.NSFW:
|
||||
return {**input_dict, "prompt_type": None} # Skip classification
|
||||
|
||||
msg = input_dict["message"]
|
||||
decoded_file = input_dict.get("decoded_file")
|
||||
|
||||
prompt_type = await PROMPT_CLASSIFIER.classify_async(msg)
|
||||
|
||||
# Override logic
|
||||
if decoded_file and (prompt_type == PromptType.DATA_ANALYSIS or 'analyze' in msg.lower() or 'data' in msg.lower()):
|
||||
prompt_type = PromptType.DATA_ANALYSIS
|
||||
elif decoded_file:
|
||||
prompt_type = PromptType.GENERAL_CHAT
|
||||
|
||||
return {**input_dict, "prompt_type": prompt_type}
|
||||
|
||||
async def generate_response_step(input_dict):
|
||||
if input_dict["moderation_label"] == ModerationLabel.NSFW:
|
||||
response = "Prompt has been marked as NSFW. If this is in error, submit a feedback with the prompt text."
|
||||
return {"type": "error", "content": response}
|
||||
|
||||
prompt_type = input_dict["prompt_type"]
|
||||
messages = input_dict["messages"]
|
||||
prompt_instance = input_dict["prompt_instance"]
|
||||
conversation_id = input_dict["conversation_id"]
|
||||
decoded_file = input_dict.get("decoded_file")
|
||||
file_type = input_dict.get("file_type")
|
||||
|
||||
# Feature Flag: Image Generation
|
||||
if prompt_type == PromptType.IMAGE_GENERATION:
|
||||
if not getattr(settings, "ALLOW_IMAGE_GENERATION", False):
|
||||
return {"type": "text", "content": "Image Generation is disabled."}
|
||||
# If enabled, proceed (assuming implementation exists, but user said "have it set to false for now")
|
||||
return {"type": "text", "content": "Image Generation is not supported at this time, but it will be soon."}
|
||||
|
||||
if prompt_type == PromptType.SEARCH:
|
||||
if getattr(settings, "ALLOW_INTERNET_ACCESS", False):
|
||||
try:
|
||||
search = DuckDuckGoSearchRun()
|
||||
search_results = search.run(input_dict["message"])
|
||||
messages.append(HumanMessage(content=f"Search Results: {search_results}"))
|
||||
except Exception as e:
|
||||
logger.error(f"Search failed: {e}")
|
||||
# If search fails, we proceed without it, essentially falling back to general chat
|
||||
pass
|
||||
else:
|
||||
# If search is disabled, we could notify the user, but for now we'll just proceed
|
||||
# potentially adding a system message or just letting the LLM handle it with its training data
|
||||
pass
|
||||
|
||||
if prompt_type == PromptType.RAG:
|
||||
service = AsyncRAGService()
|
||||
workspace = await get_workspace(conversation_id)
|
||||
return service.generate_response(messages, prompt_instance.message, workspace)
|
||||
|
||||
elif prompt_type == PromptType.DATA_ANALYSIS:
|
||||
service = AsyncDataAnalysisService()
|
||||
print(file_type)
|
||||
if not decoded_file:
|
||||
return {"type": "text", "content": "Please upload a file to perform data analysis."}
|
||||
return service.generate_response(prompt_instance.message, decoded_file, file_type)
|
||||
|
||||
else: # GENERAL_CHAT or others
|
||||
service = AsyncLLMService()
|
||||
return service.generate_response(messages, prompt_instance.message, conversation_id)
|
||||
|
||||
# --- Execution ---
|
||||
|
||||
# Pre-fetch messages and file
|
||||
messages, prompt_instance = await get_messages(
|
||||
conversation_id, message, decoded_file, file_type
|
||||
)
|
||||
if not decoded_file:
|
||||
decoded_file, file_type = await get_conversation_file_async(conversation_id)
|
||||
|
||||
if file:
|
||||
# udpate with the altered_message (logic from original)
|
||||
# Note: altered_message was defined in original but not fully used in the messages list construction in the same way
|
||||
# In original: messages = messages[:-1] + [HumanMessage(content=altered_message)]
|
||||
# I need to replicate that if I want exact behavior.
|
||||
# But altered_message was only set if file was present.
|
||||
pass # Logic is already in get_messages for the most part, but the original code had a specific override at the end.
|
||||
# Let's trust get_messages for now or add the override if needed.
|
||||
# Original:
|
||||
# if file:
|
||||
# messages = messages[:-1] + [HumanMessage(content=altered_message)]
|
||||
# I'll add it to the input_dict if needed.
|
||||
|
||||
prompt_metric = await create_prompt_metric(
|
||||
prompt_instance.id,
|
||||
prompt_instance.message,
|
||||
True if file else False,
|
||||
file_type,
|
||||
MODEL_NAME,
|
||||
conversation_id,
|
||||
)
|
||||
|
||||
pipeline_input = {
|
||||
"message": message,
|
||||
"conversation_id": conversation_id,
|
||||
"decoded_file": decoded_file,
|
||||
"file_type": file_type,
|
||||
"messages": messages,
|
||||
"prompt_instance": prompt_instance
|
||||
}
|
||||
|
||||
# Run the pipeline steps manually to handle the async generator return type of generate_response_step
|
||||
# A pure RunnableSequence might struggle with the async generator return.
|
||||
# So I'll chain them in python but conceptually it's one pipeline.
|
||||
|
||||
step1 = await check_moderation(pipeline_input)
|
||||
step2 = await classify_prompt_step(step1)
|
||||
|
||||
# Send start markers
|
||||
await self.send("CONVERSATION_ID")
|
||||
await self.send(str(conversation_id))
|
||||
await self.send("START_OF_THE_STREAM_ENDER_GAME_42")
|
||||
|
||||
response_generator_or_dict = await generate_response_step(step2)
|
||||
|
||||
full_response = ""
|
||||
|
||||
if isinstance(response_generator_or_dict, dict):
|
||||
# It's an error or simple message
|
||||
content = response_generator_or_dict.get("content", "")
|
||||
await self.send_json_message(json.dumps(response_generator_or_dict))
|
||||
full_response = content
|
||||
else:
|
||||
# It's an async generator
|
||||
async for chunk in response_generator_or_dict:
|
||||
full_response += chunk
|
||||
await self.send_json_message(chunk)
|
||||
|
||||
await self.send("END_OF_THE_STREAM_ENDER_GAME_42")
|
||||
|
||||
await save_generated_message(conversation_id, full_response)
|
||||
await finish_prompt_metric(prompt_metric, len(full_response))
|
||||
|
||||
if bytes_data:
|
||||
logger.info("we have byte data")
|
||||
357
llm_be/chat_backend/consumers_graph.py
Normal file
357
llm_be/chat_backend/consumers_graph.py
Normal file
@@ -0,0 +1,357 @@
|
||||
import json
|
||||
import base64
|
||||
import logging
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
from typing import TypedDict, Annotated, List, Union, Dict, Any
|
||||
from django.utils import timezone
|
||||
from django.conf import settings
|
||||
from django.core.files.base import ContentFile
|
||||
from channels.generic.websocket import AsyncWebsocketConsumer
|
||||
from channels.db import database_sync_to_async
|
||||
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from langgraph.graph import StateGraph, END
|
||||
|
||||
from .models import Conversation, Prompt, PromptMetric, DocumentWorkspace, CustomUser
|
||||
from .serializers import PromptSerializer
|
||||
from .services.llm_service import AsyncLLMService
|
||||
from .services.rag_services import AsyncRAGService
|
||||
from .services.title_generator import title_generator
|
||||
from .services.moderation_classifier import moderation_classifier, ModerationLabel
|
||||
from .services.prompt_classifier.prompt_classifier import PromptClassifier, PromptType
|
||||
from .services.data_analysis_service import AsyncDataAnalysisService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHANNEL_NAME: str = "llm_messages"
|
||||
MODEL_NAME: str = "llama3.2"
|
||||
PROMPT_CLASSIFIER = PromptClassifier()
|
||||
|
||||
# --- Database Helpers (Reused) ---
|
||||
|
||||
@database_sync_to_async
|
||||
def create_conversation(prompt, email, title):
|
||||
conversation = Conversation.objects.create(title=title)
|
||||
user = CustomUser.objects.get(email=email)
|
||||
conversation.user_id = user.id
|
||||
conversation.save()
|
||||
return conversation.id
|
||||
|
||||
@database_sync_to_async
|
||||
def get_workspace(conversation_id):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
return DocumentWorkspace.objects.get(company=conversation.user.company)
|
||||
|
||||
@database_sync_to_async
|
||||
def get_messages(conversation_id, prompt, file_string: str = None, file_type: str = ""):
|
||||
messages = []
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": prompt,
|
||||
"user_created": True,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance.save()
|
||||
if file_string:
|
||||
file_name = f"prompt_{prompt_instance.id}_data.{file_type}"
|
||||
f = ContentFile(file_string, name=file_name)
|
||||
prompt_instance.file.save(file_name, f)
|
||||
prompt_instance.file_type = file_type
|
||||
prompt_instance.save()
|
||||
|
||||
for prompt_obj in Prompt.objects.filter(conversation__id=conversation_id):
|
||||
messages.append(
|
||||
{
|
||||
"content": prompt_obj.message,
|
||||
"role": "user" if prompt_obj.user_created else "assistant",
|
||||
"has_file": prompt_obj.file_exists(),
|
||||
"file": prompt_obj.file if prompt_obj.file_exists() else None,
|
||||
"file_type": prompt_obj.file_type if prompt_obj.file_exists() else None,
|
||||
}
|
||||
)
|
||||
|
||||
transformed_messages = []
|
||||
for message in messages:
|
||||
if message["has_file"] and message["file_type"] != None:
|
||||
# Simplified handling compared to original, as we rely on services to handle files now
|
||||
# But we keep the structure for context
|
||||
altered_message = message["content"]
|
||||
else:
|
||||
altered_message = message["content"]
|
||||
|
||||
transformed_message = (
|
||||
AIMessage(content=altered_message)
|
||||
if message["role"] == "assistant"
|
||||
else HumanMessage(content=altered_message)
|
||||
)
|
||||
transformed_messages.append(transformed_message)
|
||||
|
||||
return transformed_messages, prompt_instance
|
||||
|
||||
@database_sync_to_async
|
||||
def save_generated_message(conversation_id, message):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": message,
|
||||
"user_created": False,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid():
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance.save()
|
||||
else:
|
||||
print(serializer.errors)
|
||||
|
||||
@database_sync_to_async
|
||||
def create_prompt_metric(prompt_id, prompt, has_file, file_type, model_name, conversation_id):
|
||||
prompt_metric = PromptMetric.objects.create(
|
||||
prompt_id=prompt_id,
|
||||
start_time=timezone.now(),
|
||||
prompt_length=len(prompt),
|
||||
has_file=has_file,
|
||||
file_type=file_type,
|
||||
model_name=model_name,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
return prompt_metric
|
||||
|
||||
@database_sync_to_async
|
||||
def finish_prompt_metric(prompt_metric, response_length):
|
||||
prompt_metric.end_time = timezone.now()
|
||||
prompt_metric.reponse_length = response_length
|
||||
prompt_metric.event = "FINISHED"
|
||||
prompt_metric.save(update_fields=["end_time", "reponse_length", "event"])
|
||||
|
||||
async def get_conversation_file_async(conversation_id):
|
||||
try:
|
||||
prompt_with_file = await Prompt.objects.filter(
|
||||
conversation_id=conversation_id
|
||||
).exclude(file='').order_by('created').afirst()
|
||||
|
||||
if prompt_with_file and prompt_with_file.file:
|
||||
file_data = await sync_to_async(prompt_with_file.file.read)()
|
||||
file_type = prompt_with_file.file_type
|
||||
return file_data, file_type
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving file from conversation history: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
# --- LangGraph State ---
|
||||
|
||||
class ChatState(TypedDict):
|
||||
message: str
|
||||
conversation_id: int
|
||||
decoded_file: Union[bytes, None]
|
||||
file_type: Union[str, None]
|
||||
messages: List[BaseMessage]
|
||||
prompt_instance: Any # Django model instance
|
||||
moderation_label: Union[ModerationLabel, None]
|
||||
prompt_type: Union[PromptType, None]
|
||||
response_generator: Any # AsyncGenerator or dict
|
||||
error: Union[str, None]
|
||||
|
||||
|
||||
# --- LangGraph Nodes ---
|
||||
|
||||
async def moderation_node(state: ChatState) -> ChatState:
|
||||
msg = state["message"]
|
||||
label = await moderation_classifier.classify_async(msg)
|
||||
return {"moderation_label": label}
|
||||
|
||||
async def classification_node(state: ChatState) -> ChatState:
|
||||
if state.get("moderation_label") == ModerationLabel.NSFW:
|
||||
return {"prompt_type": None}
|
||||
|
||||
msg = state["message"]
|
||||
decoded_file = state.get("decoded_file")
|
||||
|
||||
prompt_type = await PROMPT_CLASSIFIER.classify_async(msg)
|
||||
|
||||
# Override logic
|
||||
if decoded_file and (prompt_type == PromptType.DATA_ANALYSIS or 'analyze' in msg.lower() or 'data' in msg.lower()):
|
||||
prompt_type = PromptType.DATA_ANALYSIS
|
||||
elif decoded_file:
|
||||
prompt_type = PromptType.GENERAL_CHAT
|
||||
|
||||
return {"prompt_type": prompt_type}
|
||||
|
||||
async def generation_node(state: ChatState) -> ChatState:
|
||||
if state.get("moderation_label") == ModerationLabel.NSFW:
|
||||
response = "Prompt has been marked as NSFW. If this is in error, submit a feedback with the prompt text."
|
||||
return {"response_generator": {"type": "error", "content": response}}
|
||||
|
||||
prompt_type = state["prompt_type"]
|
||||
messages = state["messages"]
|
||||
prompt_instance = state["prompt_instance"]
|
||||
conversation_id = state["conversation_id"]
|
||||
decoded_file = state.get("decoded_file")
|
||||
file_type = state.get("file_type")
|
||||
|
||||
# Feature Flag: Image Generation
|
||||
if prompt_type == PromptType.IMAGE_GENERATION:
|
||||
if not getattr(settings, "ALLOW_IMAGE_GENERATION", False):
|
||||
return {"response_generator": {"type": "text", "content": "Image Generation is disabled."}}
|
||||
return {"response_generator": {"type": "text", "content": "Image Generation is not supported at this time, but it will be soon."}}
|
||||
|
||||
# Feature Flag: Internet Access
|
||||
if prompt_type == PromptType.SEARCH:
|
||||
if getattr(settings, "ALLOW_INTERNET_ACCESS", False):
|
||||
try:
|
||||
search = DuckDuckGoSearchRun()
|
||||
search_results = search.run(state["message"])
|
||||
messages.append(HumanMessage(content=f"Search Results: {search_results}"))
|
||||
except Exception as e:
|
||||
logger.error(f"Search failed: {e}")
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
if prompt_type == PromptType.RAG:
|
||||
service = AsyncRAGService()
|
||||
workspace = await get_workspace(conversation_id)
|
||||
generator = service.generate_response(messages, prompt_instance.message, workspace)
|
||||
return {"response_generator": generator}
|
||||
|
||||
elif prompt_type == PromptType.DATA_ANALYSIS:
|
||||
service = AsyncDataAnalysisService()
|
||||
if not decoded_file:
|
||||
return {"response_generator": {"type": "text", "content": "Please upload a file to perform data analysis."}}
|
||||
generator = service.generate_response(prompt_instance.message, decoded_file, file_type)
|
||||
return {"response_generator": generator}
|
||||
|
||||
else: # GENERAL_CHAT or others
|
||||
service = AsyncLLMService()
|
||||
generator = service.generate_response(messages, prompt_instance.message, conversation_id)
|
||||
return {"response_generator": generator}
|
||||
|
||||
|
||||
# --- LangGraph Definition ---
|
||||
|
||||
workflow = StateGraph(ChatState)
|
||||
|
||||
workflow.add_node("moderation", moderation_node)
|
||||
workflow.add_node("classification", classification_node)
|
||||
workflow.add_node("generation", generation_node)
|
||||
|
||||
workflow.set_entry_point("moderation")
|
||||
|
||||
workflow.add_edge("moderation", "classification")
|
||||
workflow.add_edge("classification", "generation")
|
||||
workflow.add_edge("generation", END)
|
||||
|
||||
app = workflow.compile()
|
||||
|
||||
|
||||
# --- Consumer ---
|
||||
|
||||
class ChatConsumerGraph(AsyncWebsocketConsumer):
|
||||
async def connect(self):
|
||||
await self.accept()
|
||||
|
||||
async def disconnect(self, close_code):
|
||||
await self.close()
|
||||
|
||||
async def send_json_message(self, data_str):
|
||||
try:
|
||||
json.loads(data_str)
|
||||
await self.send(data_str)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
await self.send(data_str)
|
||||
|
||||
async def receive(self, text_data=None, bytes_data=None):
|
||||
logger.debug(f"Text Data: {text_data}")
|
||||
print("Text Data: ", text_data)
|
||||
if text_data:
|
||||
data = json.loads(text_data)
|
||||
message = data.get("message", None)
|
||||
conversation_id = data.get("conversation_id", None)
|
||||
email = data.get("email", None)
|
||||
file = data.get("file", None)
|
||||
file_type = data.get("fileType", "")
|
||||
|
||||
if not conversation_id:
|
||||
title = await title_generator.generate_async(message)
|
||||
conversation_id = await create_conversation(message, email, title)
|
||||
|
||||
if conversation_id:
|
||||
print("Conversation ID: ", conversation_id)
|
||||
decoded_file = None
|
||||
if file:
|
||||
decoded_file = base64.b64decode(file)
|
||||
if "csv" in file_type: file_type = "csv"
|
||||
elif "xmlformats-officedocument" in file_type: file_type = "xlsx"
|
||||
elif "word" in file_type: file_type = "docx"
|
||||
elif "pdf" in file_type: file_type = "pdf"
|
||||
elif "text" in file_type: file_type = "txt"
|
||||
else: file_type = "Not Sure"
|
||||
|
||||
# Pre-fetch messages and file
|
||||
messages, prompt_instance = await get_messages(
|
||||
conversation_id, message, decoded_file, file_type
|
||||
)
|
||||
print("Messages: ", messages)
|
||||
if not decoded_file:
|
||||
decoded_file, file_type = await get_conversation_file_async(conversation_id)
|
||||
|
||||
prompt_metric = await create_prompt_metric(
|
||||
prompt_instance.id,
|
||||
prompt_instance.message,
|
||||
True if file else False,
|
||||
file_type,
|
||||
MODEL_NAME,
|
||||
conversation_id,
|
||||
)
|
||||
|
||||
# Initialize State
|
||||
initial_state = {
|
||||
"message": message,
|
||||
"conversation_id": conversation_id,
|
||||
"decoded_file": decoded_file,
|
||||
"file_type": file_type,
|
||||
"messages": messages,
|
||||
"prompt_instance": prompt_instance,
|
||||
"moderation_label": None,
|
||||
"prompt_type": None,
|
||||
"response_generator": None,
|
||||
"error": None
|
||||
}
|
||||
print("Initial State: ", initial_state)
|
||||
|
||||
# Run Graph
|
||||
final_state = await app.ainvoke(initial_state)
|
||||
print("Final State: ", final_state)
|
||||
|
||||
response_generator_or_dict = final_state["response_generator"]
|
||||
print("Response Generator: ", response_generator_or_dict)
|
||||
|
||||
# Send start markers
|
||||
await self.send("CONVERSATION_ID")
|
||||
await self.send(str(conversation_id))
|
||||
await self.send("START_OF_THE_STREAM_ENDER_GAME_42")
|
||||
|
||||
full_response = ""
|
||||
|
||||
if isinstance(response_generator_or_dict, dict):
|
||||
content = response_generator_or_dict.get("content", "")
|
||||
await self.send_json_message(json.dumps(response_generator_or_dict))
|
||||
full_response = content
|
||||
else:
|
||||
async for chunk in response_generator_or_dict:
|
||||
full_response += chunk
|
||||
await self.send_json_message(chunk)
|
||||
|
||||
await self.send("END_OF_THE_STREAM_ENDER_GAME_42")
|
||||
|
||||
await save_generated_message(conversation_id, full_response)
|
||||
await finish_prompt_metric(prompt_metric, len(full_response))
|
||||
@@ -1,6 +1,8 @@
|
||||
from django.urls import re_path
|
||||
from .views import ChatConsumerAgain
|
||||
from .consumers import ChatConsumerAgain
|
||||
from .consumers_graph import ChatConsumerGraph
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r"ws/chat_again/$", ChatConsumerAgain.as_asgi()),
|
||||
re_path(r"ws/conditional_chat/$", ChatConsumerGraph.as_asgi()),
|
||||
]
|
||||
|
||||
@@ -8,6 +8,8 @@ from typing import AsyncGenerator
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_ollama import OllamaLLM
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
import docx
|
||||
import pypdf
|
||||
|
||||
|
||||
class AsyncDataAnalysisService:
|
||||
@@ -25,14 +27,14 @@ class AsyncDataAnalysisService:
|
||||
|
||||
def _setup_chain(self):
|
||||
"""Set up the LLM chain with a prompt tailored for data analysis."""
|
||||
template = """You are an expert data analyst. Your role is to directly answer a user's question about a dataset they have provided.
|
||||
You will be given a summary and a sample of the dataset.
|
||||
template = """You are an expert data analyst. Your role is to directly answer a user's question about a dataset or document they have provided.
|
||||
You will be given a summary and a sample of the dataset, or the content of the document.
|
||||
Based on this information, provide a clear and concise answer to the user's question.
|
||||
Do not provide Python code or any other code. The user is not a developer and wants a direct answer.
|
||||
Even if you don't think the data provides enough evidence for the query, still provide a response
|
||||
|
||||
---
|
||||
Data Summary:
|
||||
Data/Document Content:
|
||||
{data_summary}
|
||||
---
|
||||
|
||||
@@ -77,6 +79,22 @@ Answer:"""
|
||||
|
||||
return "\n".join(summary_lines)
|
||||
|
||||
def _read_docx(self, file_bytes: bytes) -> str:
|
||||
"""Reads text from a DOCX file."""
|
||||
doc = docx.Document(io.BytesIO(file_bytes))
|
||||
full_text = []
|
||||
for para in doc.paragraphs:
|
||||
full_text.append(para.text)
|
||||
return "\n".join(full_text)
|
||||
|
||||
def _read_pdf(self, file_bytes: bytes) -> str:
|
||||
"""Reads text from a PDF file."""
|
||||
pdf_reader = pypdf.PdfReader(io.BytesIO(file_bytes))
|
||||
full_text = []
|
||||
for page in pdf_reader.pages:
|
||||
full_text.append(page.extract_text())
|
||||
return "\n".join(full_text)
|
||||
|
||||
def _generate_plot(self, query: str, df: pd.DataFrame) -> str:
|
||||
"""
|
||||
Generates a plot from a DataFrame based on a natural language query,
|
||||
@@ -132,28 +150,40 @@ Answer:"""
|
||||
This can be a text analysis or a plot visualization.
|
||||
"""
|
||||
try:
|
||||
if file_type == "csv":
|
||||
df = None
|
||||
data_summary = ""
|
||||
file_type = file_type.lower()
|
||||
print(file_type)
|
||||
|
||||
if "csv" in file_type:
|
||||
df = pd.read_csv(io.BytesIO(decoded_file))
|
||||
elif file_type == "xlsx":
|
||||
data_summary = self._get_dataframe_summary(df)
|
||||
elif "xlsx" in file_type or "spreadsheet" in file_type:
|
||||
df = pd.read_excel(io.BytesIO(decoded_file))
|
||||
data_summary = self._get_dataframe_summary(df)
|
||||
elif "word" in file_type or "docx" in file_type:
|
||||
data_summary = self._read_docx(decoded_file)
|
||||
elif "pdf" in file_type:
|
||||
data_summary = self._read_pdf(decoded_file)
|
||||
else:
|
||||
yield json.dumps({"type": "error", "content": "I can only analyze CSV and XLSX files."})
|
||||
yield json.dumps({"type": "error", "content": f"Unsupported file type: {file_type}. I can analyze CSV, XLSX, DOCX, and PDF files."})
|
||||
return
|
||||
|
||||
plot_keywords = ["plot", "graph", "scatter", "visualize"]
|
||||
if any(keyword in query.lower() for keyword in plot_keywords):
|
||||
try:
|
||||
image_base64 = self._generate_plot(query, df)
|
||||
yield json.dumps({
|
||||
"type": "plot",
|
||||
"format": "png",
|
||||
"image": image_base64
|
||||
})
|
||||
except ValueError as e:
|
||||
yield json.dumps({"type": "error", "content": str(e)})
|
||||
return
|
||||
# Only attempt plotting if we have a DataFrame
|
||||
if df is not None:
|
||||
plot_keywords = ["plot", "graph", "scatter", "visualize"]
|
||||
if any(keyword in query.lower() for keyword in plot_keywords):
|
||||
try:
|
||||
image_base64 = self._generate_plot(query, df)
|
||||
yield json.dumps({
|
||||
"type": "plot",
|
||||
"format": "png",
|
||||
"image": image_base64
|
||||
})
|
||||
except ValueError as e:
|
||||
yield json.dumps({"type": "error", "content": str(e)})
|
||||
return
|
||||
|
||||
data_summary = self._get_dataframe_summary(df)
|
||||
chain_input = {"data_summary": data_summary, "query": query}
|
||||
|
||||
async for chunk in self.analysis_chain.astream(chain_input):
|
||||
|
||||
@@ -10,6 +10,7 @@ class PromptType(Enum):
|
||||
RAG = auto()
|
||||
IMAGE_GENERATION = auto()
|
||||
DATA_ANALYSIS = auto()
|
||||
SEARCH = auto()
|
||||
UNKNOWN = auto()
|
||||
|
||||
|
||||
@@ -36,8 +37,9 @@ class PromptClassifier(BaseService):
|
||||
1. GENERAL_CHAT - Casual conversation, personal questions, or non-specific inquiries
|
||||
2. RAG - ONLY when explicitly requesting document/search-based knowledge
|
||||
3. IMAGE_GENERATION - Specific requests to create/modify images
|
||||
4. DATA_ANALYSIS - When a user is asking questions about an uploaded spreadsheet or CSV file. The user's message contains the data from the file.
|
||||
5. UNKNOWN - If none of the above fit
|
||||
4. DATA_ANALYSIS - When a user wants to read an uploaded document (PDF, Word, etc.) and generate an index, summary, or extract structured information. Includes prompts like "Please read this document and make me an index for it".
|
||||
5. SEARCH - When the user is seeking specific, up-to-date information (e.g., current events, celebrity news, sports scores).
|
||||
6. UNKNOWN - If none of the above fit
|
||||
|
||||
1. IMAGE_GENERATION - ONLY if:
|
||||
- Explicitly contains: "generate/create/draw/make an image/picture/photo/art/illustration"
|
||||
@@ -50,15 +52,22 @@ class PromptClassifier(BaseService):
|
||||
- Example: "What does contracts.pdf say?" → RAG
|
||||
|
||||
3. DATA_ANALYSIS - ONLY if:
|
||||
- The message explicitly contains structured data from a file (e.g., a DataFrame string)
|
||||
- The user is asking to analyze, summarize, or plot the data
|
||||
- Example: "Here is the sales data. What is the average revenue per product?" -> DATA_ANALYSIS
|
||||
- The user provides or references an uploaded document (PDF, Word, etc.) and asks for an index, summary, extraction, or analysis of its contents.
|
||||
- Example: "Please read this document and make me an index for it" → DATA_ANALYSIS
|
||||
- Example: "Here is the file content. What is the sum of all 'Sales'?" → DATA_ANALYSIS
|
||||
|
||||
4. GENERAL_CHAT - DEFAULT category when:
|
||||
5. SEARCH - ONLY if:
|
||||
- User asks for current information (news, weather, sports, stock prices)
|
||||
- User asks for specific facts that might change or require lookup (e.g. "Who won the 2024 election?")
|
||||
- Example: "What is the latest news on X?" → SEARCH
|
||||
- Example: "Who won the Super Bowl this year?" → SEARCH
|
||||
|
||||
6. GENERAL_CHAT - DEFAULT category when:
|
||||
- Doesn't meet above criteria
|
||||
- Conversational/general knowledge
|
||||
- Uncertain cases
|
||||
- Conversational/general knowledge (that doesn't require live search)
|
||||
- Creative writing (poems, jokes)
|
||||
- Example: "Tell me a joke" → GENERAL_CHAT
|
||||
- Example: "Write a poem about cats" → GENERAL_CHAT
|
||||
|
||||
Examples:
|
||||
[Definitely RAG]
|
||||
@@ -66,9 +75,15 @@ Examples:
|
||||
- "Search our documents for the 2023 marketing strategy"
|
||||
|
||||
[Definitely DATA_ANALYSIS]
|
||||
- "Please read this document and make me an index for it"
|
||||
- "Here is the file content. What is the sum of all 'Sales'?"
|
||||
- "Based on this CSV data, show me the top 5 customers."
|
||||
|
||||
[Definitely SEARCH]
|
||||
- "Who won the 2024 presidential race?"
|
||||
- "What is the latest celebrity news?"
|
||||
- "Current stock price of Apple"
|
||||
|
||||
[Definitely GENERAL_CHAT]
|
||||
- "How does photosynthesis work?" (General knowledge)
|
||||
- "Tell me a joke"
|
||||
@@ -77,7 +92,7 @@ Examples:
|
||||
[Borderline -> GENERAL_CHAT]
|
||||
- "What's our company policy on X?" (No doc reference -> general)
|
||||
|
||||
Return ONLY the label, no explanations.""",
|
||||
Return ONLY the exact Enum label (e.g. "GENERAL_CHAT"), no explanations."""
|
||||
),
|
||||
("human", "{prompt}"),
|
||||
]
|
||||
@@ -85,8 +100,40 @@ Return ONLY the label, no explanations.""",
|
||||
|
||||
self.chain = self.classification_prompt | self.llm
|
||||
|
||||
def _quick_check(self, prompt: str) -> PromptType | None:
|
||||
"""
|
||||
Performs a quick, rule-based classification before involving the LLM.
|
||||
Returns a PromptType if a clear match is found, otherwise None.
|
||||
"""
|
||||
lower_prompt = prompt.lower()
|
||||
|
||||
# IMAGE_GENERATION
|
||||
if any(keyword in lower_prompt for keyword in ["generate image", "create picture", "draw an image", "make a photo", "generate an image", "create an illustration"]):
|
||||
return PromptType.IMAGE_GENERATION
|
||||
|
||||
# DATA_ANALYSIS (often involves uploaded documents)
|
||||
if any(keyword in lower_prompt for keyword in ["read this document", "analyze this file", "summarize this pdf", "extract data from", "index this document", "based on this csv", "from this spreadsheet"]):
|
||||
return PromptType.DATA_ANALYSIS
|
||||
|
||||
# RAG (explicitly asking to search within provided context/documents)
|
||||
# This might overlap with DATA_ANALYSIS, but RAG is more about retrieval from a knowledge base.
|
||||
# The prompt examples for RAG are "What does the uploaded PDF say about quarterly results?"
|
||||
# "Search our documents for the 2023 marketing strategy"
|
||||
if ("uploaded pdf" in lower_prompt or "our documents" in lower_prompt or "this document" in lower_prompt or "the document" in lower_prompt) and \
|
||||
any(keyword in lower_prompt for keyword in ["say about", "search for", "find in", "lookup in"]):
|
||||
return PromptType.RAG
|
||||
|
||||
# SEARCH
|
||||
if any(keyword in lower_prompt for keyword in ["latest news", "current weather", "stock price", "who won", "what is the current", "breaking news", "real-time information", "up-to-date"]):
|
||||
return PromptType.SEARCH
|
||||
|
||||
return None
|
||||
|
||||
async def classify_async(self, prompt: str) -> PromptType:
|
||||
"""Asynchronously classify the prompt"""
|
||||
quick = self._quick_check(prompt)
|
||||
if quick:
|
||||
return quick
|
||||
try:
|
||||
response = await self.chain.ainvoke({"prompt": prompt})
|
||||
return self._parse_response(response.strip())
|
||||
@@ -96,6 +143,9 @@ Return ONLY the label, no explanations.""",
|
||||
|
||||
def classify(self, prompt: str) -> PromptType:
|
||||
"""Synchronously classify the prompt"""
|
||||
quick = self._quick_check(prompt)
|
||||
if quick:
|
||||
return quick
|
||||
try:
|
||||
response = self.chain.invoke({"prompt": prompt})
|
||||
return self._parse_response(response.strip())
|
||||
@@ -105,10 +155,26 @@ Return ONLY the label, no explanations.""",
|
||||
|
||||
def _parse_response(self, response: str) -> PromptType:
|
||||
"""Convert string response to PromptType enum"""
|
||||
response = response.upper()
|
||||
response = response.upper().strip()
|
||||
print(response)
|
||||
|
||||
# Direct match
|
||||
try:
|
||||
return PromptType[response]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Handle missing underscores (e.g. GENERALCHAT)
|
||||
normalized_response = response.replace("_", "")
|
||||
for prompt_type in PromptType:
|
||||
if prompt_type.name.replace("_", "") == normalized_response:
|
||||
return prompt_type
|
||||
|
||||
# Substring match as fallback
|
||||
for prompt_type in PromptType:
|
||||
if prompt_type.name in response:
|
||||
return prompt_type
|
||||
|
||||
return PromptType.UNKNOWN
|
||||
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ from channels.generic.websocket import AsyncWebsocketConsumer
|
||||
from langchain_ollama.llms import OllamaLLM
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.messages import HumanMessage, AIMessage
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain_classic.chains import RetrievalQA
|
||||
import re
|
||||
import os
|
||||
from django.conf import settings
|
||||
@@ -73,8 +73,8 @@ from .services.data_analysis_service import AsyncDataAnalysisService
|
||||
|
||||
|
||||
|
||||
from langchain.chains import create_retrieval_chain
|
||||
from langchain.chains.combine_documents import create_stuff_documents_chain
|
||||
from langchain_classic.chains import create_retrieval_chain
|
||||
from langchain_classic.chains.combine_documents import create_stuff_documents_chain
|
||||
from langchain_ollama import ChatOllama
|
||||
|
||||
import logging
|
||||
@@ -724,336 +724,7 @@ llm = OllamaLLM(model=MODEL_NAME)
|
||||
# chain = prompt | llm.with_config({"run_name": "model"}) | output_parser.with_config({"run_name": "Assistant"})
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def create_conversation(prompt, email, title):
|
||||
# return the conversation id
|
||||
conversation = Conversation.objects.create(title=title)
|
||||
conversation.save()
|
||||
|
||||
user = CustomUser.objects.get(email=email)
|
||||
conversation.user_id = user.id
|
||||
conversation.save()
|
||||
return conversation.id
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_workspace(conversation_id):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
return DocumentWorkspace.objects.get(company=conversation.user.company)
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_messages(conversation_id, prompt, file_string: str = None, file_type: str = ""):
|
||||
messages = []
|
||||
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
logger.debug(file_string)
|
||||
|
||||
# add the prompt to the conversation
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": prompt,
|
||||
"user_created": True,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance.save()
|
||||
if file_string:
|
||||
file_name = f"prompt_{prompt_instance.id}_data.{file_type}"
|
||||
f = ContentFile(file_string, name=file_name)
|
||||
prompt_instance.file.save(file_name, f)
|
||||
prompt_instance.file_type = file_type
|
||||
prompt_instance.save()
|
||||
|
||||
for prompt_obj in Prompt.objects.filter(conversation__id=conversation_id):
|
||||
messages.append(
|
||||
{
|
||||
"content": prompt_obj.message,
|
||||
"role": "user" if prompt_obj.user_created else "assistant",
|
||||
"has_file": prompt_obj.file_exists(),
|
||||
"file": prompt_obj.file if prompt_obj.file_exists() else None,
|
||||
"file_type": prompt_obj.file_type if prompt_obj.file_exists() else None,
|
||||
}
|
||||
)
|
||||
|
||||
# now transform the messages
|
||||
transformed_messages = []
|
||||
for message in messages:
|
||||
|
||||
if message["has_file"] and message["file_type"] != None:
|
||||
if "csv" in message["file_type"]:
|
||||
file_type = "csv"
|
||||
altered_message = f"{message['content']}\n The file type is csv and the file contents are: {message['file'].read()}"
|
||||
elif "xlsx" in message["file_type"]:
|
||||
file_type = "xlsx"
|
||||
df = pd.read_excel(message["file"].read())
|
||||
altered_message = f"{message['content']}\n The file type is xlsx and the file contents are: {df}"
|
||||
elif "txt" in message["file_type"]:
|
||||
file_type = "txt"
|
||||
altered_message = f"{message['content']}\n The file type is csv and the file contents are: {message['file'].read()}"
|
||||
else:
|
||||
altered_message = message["content"]
|
||||
|
||||
transformed_message = (
|
||||
AIMessage(content=altered_message)
|
||||
if message["role"] == "assistant"
|
||||
else HumanMessage(content=altered_message)
|
||||
)
|
||||
transformed_messages.append(transformed_message)
|
||||
|
||||
return transformed_messages, prompt_instance
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def save_generated_message(conversation_id, message):
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
|
||||
# add the prompt to the conversation
|
||||
serializer = PromptSerializer(
|
||||
data={
|
||||
"message": message,
|
||||
"user_created": False,
|
||||
"created": timezone.now(),
|
||||
}
|
||||
)
|
||||
if serializer.is_valid():
|
||||
prompt_instance = serializer.save()
|
||||
prompt_instance.conversation_id = conversation.id
|
||||
prompt_instance = serializer.save()
|
||||
else:
|
||||
print(serializer.errors)
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def create_prompt_metric(
|
||||
prompt_id, prompt, has_file, file_type, model_name, conversation_id
|
||||
):
|
||||
prompt_metric = PromptMetric.objects.create(
|
||||
prompt_id=prompt_id,
|
||||
start_time=timezone.now(),
|
||||
prompt_length=len(prompt),
|
||||
has_file=has_file,
|
||||
file_type=file_type,
|
||||
model_name=model_name,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
prompt_metric.save()
|
||||
return prompt_metric
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def update_prompt_metric(prompt_metric, status):
|
||||
prompt_metric.event = status
|
||||
prompt_metric.save()
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def finish_prompt_metric(prompt_metric, response_length):
|
||||
logger.info(f"finish_prompt_metric: {response_length}")
|
||||
prompt_metric.end_time = timezone.now()
|
||||
prompt_metric.reponse_length = response_length
|
||||
prompt_metric.event = "FINISHED"
|
||||
prompt_metric.save(update_fields=["end_time", "reponse_length", "event"])
|
||||
logger.info("finish_prompt_metric saved")
|
||||
|
||||
|
||||
@database_sync_to_async
|
||||
def get_retriever(conversation_id):
|
||||
logger.info(f"getting workspace from conversation: {conversation_id}")
|
||||
conversation = Conversation.objects.get(id=conversation_id)
|
||||
logger.info(f"Got conversation: {conversation}")
|
||||
workspace = DocumentWorkspace.objects.get(company=conversation.user.company)
|
||||
logger.info(f"Got workspace: {conversation}")
|
||||
vectorstore = Chroma(
|
||||
persist_directory=f"./chroma_db/",
|
||||
embedding=OllamaEmbeddings(model="llama3.2"),
|
||||
)
|
||||
return vectorstore.as_retriever()
|
||||
|
||||
async def get_conversation_file_async(conversation_id):
|
||||
try:
|
||||
# Get the very first prompt in the conversation that has a file
|
||||
prompt_with_file = await Prompt.objects.filter(
|
||||
conversation_id=conversation_id
|
||||
).exclude(file='').order_by('created').afirst()
|
||||
|
||||
if prompt_with_file and prompt_with_file.file:
|
||||
# You must use sync_to_async to access the file's binary content
|
||||
file_data = await sync_to_async(prompt_with_file.file.read)()
|
||||
file_type = prompt_with_file.file_type
|
||||
return file_data, file_type
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving file from conversation history: {e}")
|
||||
return None, None
|
||||
|
||||
PROMPT_CLASSIFIER = PromptClassifier()
|
||||
|
||||
class ChatConsumerAgain(AsyncWebsocketConsumer):
|
||||
async def connect(self):
|
||||
await self.accept()
|
||||
|
||||
async def disconnect(self, close_code):
|
||||
await self.close()
|
||||
|
||||
async def send_json_message(self, data_str):
|
||||
"""
|
||||
Ensures that the message sent over the websocket is a valid JSON object.
|
||||
If data_str is a plain string, it wraps it in {"type": "text", "content": ...}.
|
||||
"""
|
||||
try:
|
||||
# Test if it's already a valid JSON object string
|
||||
json.loads(data_str)
|
||||
# If it is, send it as is
|
||||
await self.send(data_str)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# If it's a plain string or not JSON-decodable, wrap it
|
||||
await self.send(data_str)
|
||||
|
||||
async def receive(self, text_data=None, bytes_data=None):
|
||||
logger.debug(f"Text Data: {text_data}")
|
||||
logger.debug(f"Bytes Data: {bytes_data}")
|
||||
if text_data:
|
||||
data = json.loads(text_data)
|
||||
message = data.get("message", None)
|
||||
conversation_id = data.get("conversation_id", None)
|
||||
email = data.get("email", None)
|
||||
file = data.get("file", None)
|
||||
file_type = data.get("fileType", "")
|
||||
model = data.get("modelName", "Turbo")
|
||||
|
||||
if not conversation_id:
|
||||
# we need to create a new conversation
|
||||
# we will generate a name for it too
|
||||
title = await title_generator.generate_async(message)
|
||||
conversation_id = await create_conversation(message, email, title)
|
||||
|
||||
if conversation_id:
|
||||
decoded_file = None
|
||||
|
||||
if file:
|
||||
decoded_file = base64.b64decode(file)
|
||||
logger.debug(decoded_file)
|
||||
# The `altered_message` should only be created if a file exists
|
||||
# and you want to pass its content directly to the classifier.
|
||||
# Here, we'll let the classifier decide based on the user's prompt
|
||||
# and then handle the file content separately.
|
||||
altered_message = message
|
||||
if "csv" in file_type:
|
||||
file_type = "csv"
|
||||
#altered_message = f"{message}\n The file type is csv and the file contents are: {decoded_file}"
|
||||
elif "xmlformats-officedocument" in file_type:
|
||||
file_type = "xlsx"
|
||||
#df = pd.read_excel(decoded_file)
|
||||
#altered_message = f"{message}\n The file type is xlsx and the file contents are: {df}"
|
||||
elif "text" in file_type:
|
||||
file_type = "txt"
|
||||
#altered_message = f"{message}\n The file type is txt and the file contents are: {decoded_file}"
|
||||
else:
|
||||
file_type = "Not Sure"
|
||||
|
||||
logger.info(f'received: "{message}" for conversation {conversation_id}')
|
||||
|
||||
# check the moderation here
|
||||
if (
|
||||
await moderation_classifier.classify_async(message)
|
||||
== ModerationLabel.NSFW
|
||||
):
|
||||
response = "Prompt has been marked as NSFW. If this is in error, submit a feedback with the prompt text."
|
||||
response_to_send = json.dumps({"type": "error", "content": response})
|
||||
|
||||
logger.warning("this prompt has been marked as NSFW")
|
||||
await self.send("CONVERSATION_ID")
|
||||
await self.send(str(conversation_id))
|
||||
await self.send("START_OF_THE_STREAM_ENDER_GAME_42")
|
||||
await self.send_json_message(response_to_send)
|
||||
await self.send("END_OF_THE_STREAM_ENDER_GAME_42")
|
||||
await save_generated_message(conversation_id, response_to_send)
|
||||
return
|
||||
|
||||
# TODO: add the message to the database
|
||||
# get the new conversation
|
||||
# TODO: get the messages here
|
||||
|
||||
messages, prompt = await get_messages(
|
||||
conversation_id, message, decoded_file, file_type
|
||||
)
|
||||
if not decoded_file:
|
||||
decoded_file, file_type = await get_conversation_file_async(conversation_id)
|
||||
|
||||
prompt_type = await PROMPT_CLASSIFIER.classify_async(message)
|
||||
logger.info(f"prompt_type: {prompt_type} for {message}")
|
||||
print(f"prompt_type: {prompt_type} for {message}")
|
||||
# Check for a file AND the new DATA_ANALYSIS type
|
||||
# The classifier might not correctly identify a data analysis prompt
|
||||
# without the file contents. So, we'll add a check to override.
|
||||
if decoded_file and (prompt_type == PromptType.DATA_ANALYSIS or 'analyze' in message.lower() or 'data' in message.lower()):
|
||||
prompt_type = PromptType.DATA_ANALYSIS
|
||||
elif decoded_file:
|
||||
# If a decoded_file is uploaded but the query is general, default to GENERAL_CHAT
|
||||
prompt_type = PromptType.GENERAL_CHAT
|
||||
|
||||
prompt_metric = await create_prompt_metric(
|
||||
prompt.id,
|
||||
prompt.message,
|
||||
True if file else False,
|
||||
file_type,
|
||||
MODEL_NAME,
|
||||
conversation_id,
|
||||
)
|
||||
if file:
|
||||
# udpate with the altered_message
|
||||
messages = messages[:-1] + [HumanMessage(content=altered_message)]
|
||||
logger.info(messages)
|
||||
# send it to the LLM
|
||||
|
||||
# stream the response back
|
||||
response = ""
|
||||
# start of the message
|
||||
await self.send("CONVERSATION_ID")
|
||||
await self.send(str(conversation_id))
|
||||
await self.send("START_OF_THE_STREAM_ENDER_GAME_42")
|
||||
if prompt_type == PromptType.RAG:
|
||||
service = AsyncRAGService()
|
||||
# await service.ingest_documents()
|
||||
workspace = await get_workspace(conversation_id)
|
||||
logger.info("Time to get the rag response")
|
||||
|
||||
async for chunk in service.generate_response(
|
||||
messages, prompt.message, workspace
|
||||
):
|
||||
response += chunk
|
||||
await self.send_json_message(chunk)
|
||||
elif prompt_type == PromptType.DATA_ANALYSIS:
|
||||
service = AsyncDataAnalysisService()
|
||||
if not decoded_file:
|
||||
await self.send_json_message("Please upload a file to perform data analysis.")
|
||||
else:
|
||||
async for chunk in service.generate_response(prompt.message, decoded_file, file_type):
|
||||
response += chunk
|
||||
await self.send_json_message(chunk)
|
||||
elif prompt_type == PromptType.IMAGE_GENERATION:
|
||||
response = "Image Generation is not supported at this time, but it will be soon."
|
||||
await self.send_json_message(response)
|
||||
|
||||
else:
|
||||
logger.info(f"using the AsyncLLMService\n\n{messages}\n{prompt.message}")
|
||||
service = AsyncLLMService()
|
||||
async for chunk in service.generate_response(
|
||||
messages, prompt.message, conversation_id
|
||||
):
|
||||
response += chunk
|
||||
await self.send_json_message(chunk)
|
||||
await self.send("END_OF_THE_STREAM_ENDER_GAME_42")
|
||||
|
||||
await save_generated_message(conversation_id, response)
|
||||
|
||||
await finish_prompt_metric(prompt_metric, len(response))
|
||||
|
||||
if bytes_data:
|
||||
logger.info("we have byte data")
|
||||
|
||||
|
||||
# Document Views
|
||||
|
||||
@@ -208,3 +208,52 @@ EMAIL_USE_TLS = True
|
||||
|
||||
# Captcha
|
||||
CAPTCHA_SECRET_KEY = "6LfENu4qAAAAABdrj6JTviq-LfdPP5imhE-Os7h9"
|
||||
|
||||
directory_path = 'logs'
|
||||
# LOGGING = {
|
||||
# 'version': 1,
|
||||
# 'disable_existing_loggers': False,
|
||||
# 'formatters': {
|
||||
# 'verbose': {
|
||||
# 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
|
||||
# 'style': '{',
|
||||
# },
|
||||
# 'simple': {
|
||||
# 'format': '{levelname} {message}',
|
||||
# 'style': '{',
|
||||
# },
|
||||
# },
|
||||
# 'handlers': {
|
||||
# 'console': {
|
||||
# 'level': 'INFO',
|
||||
# 'class': 'logging.StreamHandler',
|
||||
# 'formatter': 'simple',
|
||||
# },
|
||||
# 'file': {
|
||||
# 'level': 'DEBUG',
|
||||
# 'class': 'logging.handlers.RotatingFileHandler',
|
||||
# 'filename': f'{directory_path}/django.log',
|
||||
# 'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
# 'backupCount': 5,
|
||||
# 'formatter': 'verbose',
|
||||
# },
|
||||
# },
|
||||
# 'loggers': {
|
||||
# 'django': {
|
||||
# 'handlers': ['console', 'file'],
|
||||
# 'level': 'INFO',
|
||||
# 'propagate': True,
|
||||
# },
|
||||
# 'my_app': {
|
||||
# 'handlers': ['console', 'file'],
|
||||
# 'level': 'DEBUG',
|
||||
# 'propagate': False,
|
||||
# },
|
||||
# },
|
||||
# }
|
||||
|
||||
os.makedirs(directory_path, exist_ok=True)
|
||||
|
||||
# Feature Flags
|
||||
ALLOW_IMAGE_GENERATION = False
|
||||
ALLOW_INTERNET_ACCESS = True
|
||||
|
||||
371
requirements.txt
371
requirements.txt
@@ -1,234 +1,265 @@
|
||||
aiofiles==24.1.0
|
||||
accelerate==1.12.0
|
||||
aiofiles==25.1.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.11.18
|
||||
aiosignal==1.3.2
|
||||
aiohttp==3.13.2
|
||||
aiosignal==1.4.0
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
antlr4-python3-runtime==4.9.3
|
||||
anyio==4.8.0
|
||||
asgiref==3.8.1
|
||||
antlr4-python3-runtime==4.13.2
|
||||
anyio==4.12.0
|
||||
asgiref==3.11.0
|
||||
astor==0.8.1
|
||||
attrs==25.1.0
|
||||
autobahn==24.4.2
|
||||
Automat==24.8.1
|
||||
attrs==25.4.0
|
||||
autobahn==25.11.1
|
||||
Automat==25.4.16
|
||||
backoff==2.2.1
|
||||
bcrypt==4.3.0
|
||||
beautifulsoup4==4.13.4
|
||||
black==25.1.0
|
||||
build==1.2.2.post1
|
||||
bcrypt==5.0.0
|
||||
beautifulsoup4==4.14.3
|
||||
black==25.11.0
|
||||
brotli==1.2.0
|
||||
build==1.3.0
|
||||
cachetools==5.5.2
|
||||
certifi==2025.1.31
|
||||
cffi==1.17.1
|
||||
channels==4.2.0
|
||||
cbor2==5.7.1
|
||||
certifi==2025.11.12
|
||||
cffi==2.0.0
|
||||
channels==4.3.2
|
||||
chardet==5.2.0
|
||||
charset-normalizer==3.4.1
|
||||
charset-normalizer==3.4.4
|
||||
chroma-hnswlib==0.7.6
|
||||
chromadb==1.0.7
|
||||
click==8.1.8
|
||||
chromadb==1.3.5
|
||||
click==8.3.1
|
||||
coloredlogs==15.0.1
|
||||
constantly==23.10.4
|
||||
contourpy==1.3.1
|
||||
cryptography==44.0.2
|
||||
contourpy==1.3.3
|
||||
cryptography==46.0.3
|
||||
cycler==0.12.1
|
||||
daphne==4.1.2
|
||||
daphne==4.2.1
|
||||
dataclasses-json==0.6.7
|
||||
Deprecated==1.2.18
|
||||
ddgs==9.9.3
|
||||
Deprecated==1.3.1
|
||||
distro==1.9.0
|
||||
Django==5.1.7
|
||||
Django==6.0
|
||||
django-autoslug==1.9.9
|
||||
django-cors-headers==4.7.0
|
||||
django-filter==25.1
|
||||
djangorestframework==3.15.2
|
||||
djangorestframework_simplejwt==5.5.0
|
||||
duckdb==1.2.1
|
||||
durationpy==0.9
|
||||
django-cors-headers==4.9.0
|
||||
django-filter==25.2
|
||||
djangorestframework==3.16.1
|
||||
djangorestframework_simplejwt==5.5.1
|
||||
duckdb==1.4.2
|
||||
durationpy==0.10
|
||||
effdet==0.4.1
|
||||
emoji==2.14.1
|
||||
emoji==2.15.0
|
||||
et_xmlfile==2.0.0
|
||||
eval_type_backport==0.2.2
|
||||
Faker==37.0.0
|
||||
fastapi==0.115.9
|
||||
filelock==3.17.0
|
||||
eval_type_backport==0.3.1
|
||||
fake-useragent==2.2.0
|
||||
Faker==38.2.0
|
||||
fastapi==0.124.0
|
||||
filelock==3.20.0
|
||||
filetype==1.2.0
|
||||
flatbuffers==25.2.10
|
||||
fonttools==4.56.0
|
||||
frozenlist==1.6.0
|
||||
fsspec==2025.2.0
|
||||
google-api-core==2.24.2
|
||||
google-auth==2.39.0
|
||||
google-cloud-vision==3.10.1
|
||||
googleapis-common-protos==1.70.0
|
||||
greenlet==3.1.1
|
||||
grpcio==1.72.0rc1
|
||||
grpcio-status==1.72.0rc1
|
||||
h11==0.14.0
|
||||
flatbuffers==25.9.23
|
||||
fonttools==4.61.0
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.12.0
|
||||
google-api-core==2.28.1
|
||||
google-auth==2.43.0
|
||||
google-cloud-vision==3.11.0
|
||||
googleapis-common-protos==1.72.0
|
||||
greenlet==3.3.0
|
||||
grpcio==1.76.0
|
||||
grpcio-status==1.76.0
|
||||
h11==0.16.0
|
||||
h2==4.3.0
|
||||
hf-xet==1.2.0
|
||||
hpack==4.1.0
|
||||
html5lib==1.1
|
||||
httpcore==1.0.7
|
||||
httptools==0.6.4
|
||||
httpcore==1.0.9
|
||||
httptools==0.7.1
|
||||
httpx==0.28.1
|
||||
httpx-sse==0.4.0
|
||||
huggingface-hub==0.30.2
|
||||
httpx-sse==0.4.3
|
||||
huggingface-hub==0.36.0
|
||||
humanfriendly==10.0
|
||||
hyperframe==6.1.0
|
||||
hyperlink==21.0.0
|
||||
idna==3.10
|
||||
importlib_metadata==8.6.1
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.0
|
||||
importlib_resources==6.5.2
|
||||
incremental==24.7.2
|
||||
Incremental==24.11.0
|
||||
Jinja2==3.1.6
|
||||
jiter==0.8.2
|
||||
joblib==1.4.2
|
||||
jiter==0.12.0
|
||||
joblib==1.5.2
|
||||
jsonpatch==1.33
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.23.0
|
||||
jsonschema-specifications==2025.4.1
|
||||
kiwisolver==1.4.8
|
||||
kubernetes==32.0.1
|
||||
langchain==0.3.24
|
||||
langchain-community==0.3.23
|
||||
langchain-core==0.3.56
|
||||
langchain-ollama==0.2.3
|
||||
langchain-text-splitters==0.3.8
|
||||
jsonschema==4.25.1
|
||||
jsonschema-specifications==2025.9.1
|
||||
kiwisolver==1.4.9
|
||||
kubernetes==34.1.0
|
||||
langchain==1.1.2
|
||||
langchain-chroma==1.0.0
|
||||
langchain-classic==1.0.0
|
||||
langchain-community==0.4.1
|
||||
langchain-core==1.1.1
|
||||
langchain-ollama==1.0.0
|
||||
langchain-text-splitters==1.0.0
|
||||
langdetect==1.0.9
|
||||
langsmith==0.3.13
|
||||
lxml==5.4.0
|
||||
Markdown==3.7
|
||||
markdown-it-py==3.0.0
|
||||
MarkupSafe==3.0.2
|
||||
langgraph==1.0.4
|
||||
langgraph-checkpoint==3.0.1
|
||||
langgraph-prebuilt==1.0.5
|
||||
langgraph-sdk==0.2.14
|
||||
langsmith==0.4.56
|
||||
lxml==6.0.2
|
||||
Markdown==3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
marshmallow==3.26.1
|
||||
matplotlib==3.10.1
|
||||
matplotlib==3.10.7
|
||||
mdurl==0.1.2
|
||||
mmh3==5.1.0
|
||||
ml_dtypes==0.5.4
|
||||
mmh3==5.2.0
|
||||
mpmath==1.3.0
|
||||
multidict==6.4.3
|
||||
mypy-extensions==1.0.0
|
||||
msgpack==1.1.2
|
||||
multidict==6.7.0
|
||||
mypy_extensions==1.1.0
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.4.2
|
||||
nltk==3.9.1
|
||||
numpy==2.2.3
|
||||
nvidia-cublas-cu12==12.6.4.1
|
||||
nvidia-cuda-cupti-cu12==12.6.80
|
||||
nvidia-cuda-nvrtc-cu12==12.6.77
|
||||
nvidia-cuda-runtime-cu12==12.6.77
|
||||
nvidia-cudnn-cu12==9.5.1.17
|
||||
nvidia-cufft-cu12==11.3.0.4
|
||||
nvidia-cufile-cu12==1.11.1.6
|
||||
nvidia-curand-cu12==10.3.7.77
|
||||
nvidia-cusolver-cu12==11.7.1.2
|
||||
nvidia-cusparse-cu12==12.5.4.2
|
||||
nvidia-cusparselt-cu12==0.6.3
|
||||
nvidia-nccl-cu12==2.26.2
|
||||
nvidia-nvjitlink-cu12==12.6.85
|
||||
nvidia-nvtx-cu12==12.6.77
|
||||
oauthlib==3.2.2
|
||||
networkx==3.6
|
||||
nltk==3.9.2
|
||||
numpy==2.2.6
|
||||
nvidia-cublas-cu12==12.8.4.1
|
||||
nvidia-cuda-cupti-cu12==12.8.90
|
||||
nvidia-cuda-nvrtc-cu12==12.8.93
|
||||
nvidia-cuda-runtime-cu12==12.8.90
|
||||
nvidia-cudnn-cu12==9.10.2.21
|
||||
nvidia-cufft-cu12==11.3.3.83
|
||||
nvidia-cufile-cu12==1.13.1.3
|
||||
nvidia-curand-cu12==10.3.9.90
|
||||
nvidia-cusolver-cu12==11.7.3.90
|
||||
nvidia-cusparse-cu12==12.5.8.93
|
||||
nvidia-cusparselt-cu12==0.7.1
|
||||
nvidia-nccl-cu12==2.27.5
|
||||
nvidia-nvjitlink-cu12==12.8.93
|
||||
nvidia-nvshmem-cu12==3.3.20
|
||||
nvidia-nvtx-cu12==12.8.90
|
||||
oauthlib==3.3.1
|
||||
olefile==0.47
|
||||
ollama==0.4.7
|
||||
ollama==0.6.1
|
||||
omegaconf==2.3.0
|
||||
onnx==1.18.0
|
||||
onnxruntime==1.21.1
|
||||
openai==1.65.4
|
||||
opencv-python==4.11.0.86
|
||||
onnx==1.20.0
|
||||
onnxruntime==1.23.2
|
||||
openai==2.9.0
|
||||
opencv-python==4.12.0.88
|
||||
openpyxl==3.1.5
|
||||
opentelemetry-api==1.32.1
|
||||
opentelemetry-exporter-otlp-proto-common==1.32.1
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.32.1
|
||||
opentelemetry-api==1.39.0
|
||||
opentelemetry-exporter-otlp-proto-common==1.39.0
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.39.0
|
||||
opentelemetry-instrumentation==0.53b1
|
||||
opentelemetry-instrumentation-asgi==0.53b1
|
||||
opentelemetry-instrumentation-fastapi==0.53b1
|
||||
opentelemetry-proto==1.32.1
|
||||
opentelemetry-sdk==1.32.1
|
||||
opentelemetry-semantic-conventions==0.53b1
|
||||
opentelemetry-proto==1.39.0
|
||||
opentelemetry-sdk==1.39.0
|
||||
opentelemetry-semantic-conventions==0.60b0
|
||||
opentelemetry-util-http==0.53b1
|
||||
orjson==3.10.15
|
||||
orjson==3.11.5
|
||||
ormsgpack==1.12.0
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.2.3
|
||||
packaging==25.0
|
||||
pandas==2.3.3
|
||||
pandasai==2.4.2
|
||||
parameterized==0.9.0
|
||||
pathspec==0.12.1
|
||||
pdf2image==1.17.0
|
||||
pdfminer.six==20250506
|
||||
pi_heif==0.22.0
|
||||
pikepdf==9.7.0
|
||||
pillow==11.1.0
|
||||
platformdirs==4.3.6
|
||||
posthog==4.0.1
|
||||
propcache==0.3.1
|
||||
pdfminer.six==20251107
|
||||
pi==0.1.2
|
||||
pi_heif==1.1.1
|
||||
pikepdf==10.0.2
|
||||
pillow==12.0.0
|
||||
platformdirs==4.5.1
|
||||
posthog==5.4.0
|
||||
primp==0.15.0
|
||||
propcache==0.4.1
|
||||
proto-plus==1.26.1
|
||||
protobuf==6.31.0rc2
|
||||
psutil==7.0.0
|
||||
protobuf==6.33.2
|
||||
psutil==7.1.3
|
||||
py-ubjson==0.16.1
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.1
|
||||
pycocotools==2.0.8
|
||||
pycparser==2.22
|
||||
pydantic==2.11.4
|
||||
pydantic-settings==2.9.1
|
||||
pydantic_core==2.33.2
|
||||
Pygments==2.19.1
|
||||
pyasn1_modules==0.4.2
|
||||
pybase64==1.4.3
|
||||
pycocotools==2.0.10
|
||||
pycparser==2.23
|
||||
pydantic==2.12.5
|
||||
pydantic-settings==2.12.0
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
PyJWT==2.10.1
|
||||
pyOpenSSL==25.0.0
|
||||
pyparsing==3.2.1
|
||||
pypdf==5.4.0
|
||||
pypdfium2==4.30.1
|
||||
pyOpenSSL==25.3.0
|
||||
pyparsing==3.2.5
|
||||
pypdf==6.4.0
|
||||
pypdfium2==5.1.0
|
||||
PyPika==0.48.9
|
||||
pyproject_hooks==1.2.0
|
||||
python-dateutil==2.9.0.post0
|
||||
python-dotenv==1.0.1
|
||||
python-iso639==2025.2.18
|
||||
python-docx==1.2.0
|
||||
python-dotenv==1.2.1
|
||||
python-iso639==2025.11.16
|
||||
python-magic==0.4.27
|
||||
python-multipart==0.0.20
|
||||
python-oxmsg==0.0.2
|
||||
pytz==2025.1
|
||||
PyYAML==6.0.2
|
||||
RapidFuzz==3.13.0
|
||||
referencing==0.36.2
|
||||
regex==2024.11.6
|
||||
requests==2.32.3
|
||||
pytokens==0.3.0
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
RapidFuzz==3.14.3
|
||||
referencing==0.37.0
|
||||
regex==2025.11.3
|
||||
requests==2.32.5
|
||||
requests-oauthlib==2.0.0
|
||||
requests-toolbelt==1.0.0
|
||||
rich==14.0.0
|
||||
rpds-py==0.24.0
|
||||
rich==14.2.0
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.5.3
|
||||
scipy==1.15.2
|
||||
safetensors==0.7.0
|
||||
scipy==1.16.3
|
||||
service-identity==24.2.0
|
||||
setuptools==75.8.2
|
||||
setuptools==80.9.0
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
sniffio==1.3.1
|
||||
soupsieve==2.7
|
||||
SQLAlchemy==2.0.38
|
||||
sqlglot==26.9.0
|
||||
sqlglotrs==0.4.0
|
||||
sqlparse==0.5.3
|
||||
starlette==0.45.3
|
||||
socksio==1.0.0
|
||||
soupsieve==2.8
|
||||
SQLAlchemy==2.0.44
|
||||
sqlglot==28.1.0
|
||||
sqlglotrs==0.8.0
|
||||
sqlparse==0.5.4
|
||||
starlette==0.50.0
|
||||
sympy==1.14.0
|
||||
tenacity==9.0.0
|
||||
timm==1.0.15
|
||||
tokenizers==0.21.1
|
||||
torch==2.7.0
|
||||
torchvision==0.22.0
|
||||
tenacity==9.1.2
|
||||
timm==1.0.22
|
||||
tokenizers==0.22.1
|
||||
torch==2.9.1
|
||||
torchvision==0.24.1
|
||||
tqdm==4.67.1
|
||||
transformers==4.51.3
|
||||
triton==3.3.0
|
||||
Twisted==24.11.0
|
||||
txaio==23.1.1
|
||||
typer==0.15.3
|
||||
transformers==4.57.3
|
||||
triton==3.5.1
|
||||
Twisted==25.5.0
|
||||
txaio==25.12.1
|
||||
typer==0.20.0
|
||||
typer-slim==0.20.0
|
||||
typing-inspect==0.9.0
|
||||
typing-inspection==0.4.0
|
||||
typing_extensions==4.12.2
|
||||
tzdata==2025.1
|
||||
unstructured==0.17.2
|
||||
unstructured-client==0.34.0
|
||||
unstructured-inference==0.8.10
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.2
|
||||
ujson==5.11.0
|
||||
unstructured==0.18.21
|
||||
unstructured-client==0.42.4
|
||||
unstructured.pytesseract==0.3.15
|
||||
unstructured_inference==1.1.2
|
||||
urllib3==2.3.0
|
||||
uvicorn==0.34.2
|
||||
uvloop==0.21.0
|
||||
watchfiles==1.0.5
|
||||
uuid_utils==0.12.0
|
||||
uvicorn==0.38.0
|
||||
uvloop==0.22.1
|
||||
watchfiles==1.1.1
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.8.0
|
||||
websocket-client==1.9.0
|
||||
websockets==15.0.1
|
||||
wrapt==1.17.2
|
||||
yarl==1.20.0
|
||||
zipp==3.21.0
|
||||
zope.interface==7.2
|
||||
zstandard==0.23.0
|
||||
wrapt==2.0.1
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
zope.interface==8.1.1
|
||||
zstandard==0.25.0
|
||||
|
||||
Reference in New Issue
Block a user