Skip to content
Snippets Groups Projects
Commit 060f7928 authored by Sangamithra Panneer Selvam's avatar Sangamithra Panneer Selvam
Browse files

Metrics Calculation script

parent 7ae1a071
No related branches found
No related tags found
No related merge requests found
......@@ -32,12 +32,8 @@ message LLMQuery {
message LLMAnswer {
string text = 1;
<<<<<<< HEAD
ConvoID id = 2;
string relevant_context = 3;
=======
ConvoID id = 2;
>>>>>>> 509e561080ec6ceab6019fc084096e7c8ccb2503
}
message Status {
......
......@@ -28,7 +28,6 @@ def faiss_db():
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
<<<<<<< HEAD
def get_context(retriever, question):
docs = retriever.get_relevant_documents(question)
......@@ -36,8 +35,6 @@ def get_context(retriever, question):
context = "\n".join([doc.page_content for doc in docs])
return context
=======
>>>>>>> 509e561080ec6ceab6019fc084096e7c8ccb2503
def rag_chain(retriever, system_instruction, llm_service, question):
prompt_template_generic = """
......@@ -67,11 +64,7 @@ def rag_chain(retriever, system_instruction, llm_service, question):
# Invoke the chain with the provided question
rag_ans = rag_chain.invoke(question)
<<<<<<< HEAD
# Get relevant context
relevant_context= get_context(retriever, question)
return rag_ans, relevant_context
=======
return rag_ans
>>>>>>> 509e561080ec6ceab6019fc084096e7c8ccb2503
......@@ -7,7 +7,6 @@ import llms_pb2
from app import app_run
from apps.application_manager import LLMApplicationManager
<<<<<<< HEAD
class LLMServiceServicer(llms_pb2_grpc.LLMServiceServicer):
def __init__(self, app_type="invoke_direct_llm"):
self.application = LLMApplicationManager.get_application(app_type) # Returns a class
......@@ -24,24 +23,6 @@ class LLMServiceServicer(llms_pb2_grpc.LLMServiceServicer):
)
=======
class LLMServiceServicer(llms_pb2_grpc.LLMServiceServicer):
def __init__(self, app_type="invoke_direct_llm"):
self.application = LLMApplicationManager.get_application(app_type)
def instruct_llm_stream(self, request_iterator, context):
for llm_query in request_iterator:
response_text = self.application.process_request(llm_query)
print(
f"Processed: {llm_query.input.prompt} with question: {llm_query.qa.question}"
)
yield llms_pb2.LLMAnswer(
id=llms_pb2.ConvoID(q_id=llm_query.id.q_id), text=response_text
)
>>>>>>> 509e561080ec6ceab6019fc084096e7c8ccb2503
def serve(port, app_type="invoke_direct_llm"):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
llms_pb2_grpc.add_LLMServiceServicer_to_server(LLMServiceServicer(app_type), server)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment