Skip to content
Snippets Groups Projects
Commit 6d179e64 authored by Swetha Lakshmana Murthy's avatar Swetha Lakshmana Murthy
Browse files

Edit rag_chain.py.j2

parent c3cb917e
Branches
Tags
No related merge requests found
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
import os
from typing import List
from langchain.schema import HumanMessage
from base64 import b64encode
from openai import OpenAI
from collections import defaultdict
# FAISS folder path
local_directory = os.getenv("SHARED_FOLDER_PATH")
......@@ -17,6 +13,8 @@ faiss_folder = os.path.join(local_directory, "faiss_index")
# History management
store = {}
def faiss_db():
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-base-en-v1.5")
try:
......@@ -42,73 +40,100 @@ def get_context(retriever, question):
context = "\n".join([doc.page_content for doc in docs])
return context
def get_session_history(session_id: str) -> BaseChatMessageHistory:
def get_session_history(session_id: str) -> List[str]:
"""
Retrieve or initialize the chat history for a given session.
"""
global store
if session_id not in store:
store[session_id] = ChatMessageHistory()
print(f"Store : {store}")
store[session_id] = []
return store[session_id]
def rag_chain(retriever, llm_service, question):
### Contextualize question ###
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is."""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
# History aware retreiever gives the reformualted question and retreieved context
history_aware_retriever = create_history_aware_retriever(
llm_service, retriever, contextualize_q_prompt
)
qa_system_prompt = """
<|start_header_id|>user<|end_header_id|>
You are an assistant for answering questions.
You are given the extracted parts of a long document and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer.
Context: {context}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
def update_session_history(session_id: str, user_input: str, assistant_response: str):
"""
Append the user and assistant interaction to the session's chat history.
"""
history = get_session_history(session_id)
history.append(f"User: {user_input}")
history.append(f"Assistant: {assistant_response}")
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm_service, qa_prompt)
# create retrieval chain passes this reformulated question, retreieved context into the qa_prompt. cannot be seen explicitly
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
conversational_rag_chain = RunnableWithMessageHistory(
rag_chain,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
output_messages_key="answer",
)
rag_ans = conversational_rag_chain.invoke(
{"input": question},
config={"configurable": {"session_id": "abc123"}},
)["answer"]
print(f"rag answer: {rag_ans}")
def build_prompt_with_history(session_id: str, question: str, context: str, custom_prompt:str, model_name:str) -> str:
"""
Build a prompt that includes the system prompt, context, and chat history.
"""
history = get_session_history(session_id)
history_text = "\n".join(history)
default_string = "System instruction here"
if custom_prompt.lower() == default_string.lower() and "opengpt" in model_name.lower():
system_prompt = f"""
System: Ein Gespräch zwischen einem Menschen und einem Assistenten mit künstlicher Intelligenz. Der Assistent gibt hilfreiche und höfliche Antworten auf die Fragen des Menschen.
Konversation_Geschichte: {history_text}
Kontext: {context}
Beantworte folgende Frage ausschließlich auf Basis des oben gegebenen Kontextes und Konversation Geschichte. Beziehen Sie sich in der abschließenden Antwort nicht auf interne Prozesse und Gedankenketten.
Frage: {question}
Assistant:
"""
#system_prompt = f"""
#System: A chat between a human and an artificial intelligence assistant. The assistant gives helpful and polite answers to the user's questions and must provide answers concisely in English langauge.
#previous_conversations: {history_text}
#Context: {context}
#Answer the following user's question based solely on the context given above. Do not reference any internal processing, chain-of-thought, or previous conversation details in the final answer.
#User: {question}
#Assistant:
#"""
elif custom_prompt.lower() == default_string.lower():
system_prompt = f"""
You are an assistant for answering questions.
You are given the extracted parts of a long document and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer.
Previous_conversation: {history_text}
Context: {context}
Answer the following question based on the retrieved context. Use previous conversations if users ask any follow up questions.
Question: {question}
Assistant:
"""
else :
system_prompt = f"""
{custom_prompt.format_map({'history_text':history_text, 'question': question, 'context': context })}
"""
return system_prompt
def rag_chain(retriever, llm_service, question, custom_prompt):
relevant_context= get_context(retriever, question)
# Build the prompt with history
session_id = "session_123"
qa_prompt = build_prompt_with_history(session_id, question, relevant_context, custom_prompt, llm_service.model_name)
print("prompt: ", qa_prompt)
if "opengpt" in llm_service.model_name.lower() :
print("LLM received:", llm_service.model_name)
rag_ans = llm_service.invoke([HumanMessage(content=qa_prompt)])
else:
print("LLM received:", llm_service.model_name)
rag_ans = llm_service.invoke(qa_prompt)
print("rag answer: ", rag_ans.content)
# Update the chat history
update_session_history(session_id, question, rag_ans)
return rag_ans, relevant_context
return rag_ans.content, relevant_context
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment