diff --git a/User_Feedback_Diagnostics/server.py b/User_Feedback_Diagnostics/server.py
index 590cde275baa285c08c1a3c26cdbf67d16b1ce38..3f37ea59bb8ce21a85d12c154350a54bde19a2cd 100644
--- a/User_Feedback_Diagnostics/server.py
+++ b/User_Feedback_Diagnostics/server.py
@@ -4,6 +4,8 @@ import threading
 import os
 import json
 from datetime import datetime
+import time
+import json
 
 import user_ratings_pb2
 import user_ratings_pb2_grpc
@@ -11,6 +13,8 @@ import user_ratings_pb2_grpc
 from app import app_run
 from utils_calculate_metrics import process_ratings_file
 import logging
+
+
 logger = logging.getLogger(__name__)
 logger.setLevel(logging.DEBUG)
 
@@ -20,6 +24,16 @@ file_path = os.path.join(local_directory, 'ratings.json')
 #file_path = "ratings.json"
 
 
+def run_periodically():
+        print("[run_periodically] Starting periodic task...")
+        class_object = RatingsService()
+        while True:
+            print("[run_periodically] Calling CalculateMetrics")            
+            calculated_metrics = class_object._calculate_metrics_internal()
+            #class_object._get_metrics_internal(calculated_metrics)
+            time.sleep(2)  # Wait for 2 seconds
+
+
 class RatingsService(user_ratings_pb2_grpc.RatingsServiceServicer):
     def __init__(self):
         self.has_metrics = True # Flag to indicate the presence of metrics in this node and print a message accordingly.
@@ -45,6 +59,8 @@ class RatingsService(user_ratings_pb2_grpc.RatingsServiceServicer):
                             timestamp=value
                         elif key.lower() == "feedback" :
                             feedback=value
+                        elif key.lower() == "prompt" :
+                            prompt=value
                         elif "star" in key.lower() or "rating" in key.lower():
                             stars = value
                         elif isinstance(value,(float,int)):
@@ -52,23 +68,22 @@ class RatingsService(user_ratings_pb2_grpc.RatingsServiceServicer):
 
                     print("llm_metrics: ",llm_metrics)
 
-                    rating = user_ratings_pb2.Rating(filename=filename, stars=stars, feedback=feedback, timestamp=timestamp, llm_metrics=llm_metrics)
+                    rating = user_ratings_pb2.Rating(filename=filename, stars=stars, feedback=feedback, prompt=prompt, timestamp=timestamp, llm_metrics=llm_metrics)
                     yield rating
         except:
             rating = user_ratings_pb2.Rating(filename='', stars=0.0, feedback='', timestamp='', llm_metrics={})
             yield rating
-           
-
-
-
-    def CalculateMetrics(self, request, context):   # Perform calculation for average metrics
+        
+        
+    def _calculate_metrics_internal(self):
 
-        if file_path:
-            score_averages, average_rating, average_sentiment_score = process_ratings_file(file_path)
+        if os.path.exists(file_path):
+            score_averages, average_rating, average_sentiment_score, current_prompt = process_ratings_file(file_path)
             
             calculated_metrics = user_ratings_pb2.CalculatedMetrics()
             calculated_metrics.type = "LLM Metrics"
             calculated_metrics.status_text = "success"
+            calculated_metrics.prompt=current_prompt
             calculated_metrics.more_is_better.overall_avg_feedback_sentiment_score = average_sentiment_score
             calculated_metrics.more_is_better.overall_avg_star_rating = average_rating
             for key,value in score_averages.items():
@@ -79,8 +94,10 @@ class RatingsService(user_ratings_pb2_grpc.RatingsServiceServicer):
         else:
             return ()
 
+
     def get_metrics_metadata(self, request, context):  # Get excution metadata
-        
+
+       
         final_metrics_dict = {'metrics': {}}
         now = datetime.now()
         current_time = now.strftime("%Y-%m-%d %H:%M:%S")
@@ -99,13 +116,20 @@ class RatingsService(user_ratings_pb2_grpc.RatingsServiceServicer):
         print(final_metrics_dict)
 
 
+    def CalculateMetrics(self, request, context):   # Perform calculation for average metrics
+
+            calculated_metrics = self._calculate_metrics_internal()
+            return calculated_metrics
+    
+
 def serve(port):
     server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
     user_ratings_pb2_grpc.add_RatingsServiceServicer_to_server(RatingsService(), server)
     server.add_insecure_port("[::]:{}".format(port))
     print("Starting server. Listening on port : " + str(port))
     server.start()
-    threading.Thread(target=app_run()).start()
+    threading.Thread(target=app_run, daemon=True).start()
+    threading.Thread(target=run_periodically, daemon=True).start()
     server.wait_for_termination()
 
 if __name__ == "__main__":
diff --git a/User_Feedback_Diagnostics/user_ratings.proto b/User_Feedback_Diagnostics/user_ratings.proto
index 75e923625f6a82beafe6d0de4cb6cafbce32dedb..5729595aa6077c48a33c6810b7c1e31a99cb8c3c 100644
--- a/User_Feedback_Diagnostics/user_ratings.proto
+++ b/User_Feedback_Diagnostics/user_ratings.proto
@@ -9,7 +9,8 @@ message Rating {
     float stars = 2;
     string feedback = 3; 
     string timestamp = 4;
-    map<string, double> llm_metrics = 5 ;
+    string prompt = 5;
+    map<string, double> llm_metrics = 6 ;
 }
 
 
@@ -23,6 +24,7 @@ message CalculatedMetrics {
 
     string type = 1;
     string status_text = 2;
+    string prompt = 3;
     Moreisbetter more_is_better = 4;
 }
 
diff --git a/User_Feedback_Diagnostics/utils_calculate_metrics.py b/User_Feedback_Diagnostics/utils_calculate_metrics.py
index 1135a965608d0fa5f75c9f8f30fa0a6518d5ebab..7009608804860ae241647ee6405e4f85c50cf1c8 100644
--- a/User_Feedback_Diagnostics/utils_calculate_metrics.py
+++ b/User_Feedback_Diagnostics/utils_calculate_metrics.py
@@ -24,12 +24,17 @@ def process_ratings_file(file_path):
     total_score={}
     num_score={}
     num_ratings = 0
-
+    current_prompt = None
     for ratings_dict in ratings_list:
-        num_ratings+=1
-          
+        num_ratings+=1          
+
+        # Handle prompt
+        prompt = ratings_dict.get("Prompt")
+        if prompt:
+            current_prompt = prompt
+
         for key,value in ratings_dict.items():
-            if isinstance(value,str) and "filename" not in key.lower():
+            if isinstance(value,str) and "filename" not in key.lower() and "prompt" not in key.lower():
                     try:
                         parser.parse(value)
                     except:
@@ -58,4 +63,4 @@ def process_ratings_file(file_path):
     average_sentiment_score = round(total_sentiment_score / num_ratings, 2) if num_ratings > 0 else 0
     average_rating = round(total_rating / num_ratings , 2) if num_ratings > 0 else 0
         
-    return score_averages, average_rating, average_sentiment_score
\ No newline at end of file
+    return score_averages, average_rating, average_sentiment_score, current_prompt
\ No newline at end of file