diff --git a/multi-agentic-travel-advisor/chatbot/DOCKER_TAG b/multi-agentic-travel-advisor/chatbot/DOCKER_TAG
new file mode 100644
index 0000000000000000000000000000000000000000..ed87274f8f7a30e07630ef712a5aacfe8ebf7647
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/DOCKER_TAG
@@ -0,0 +1,2 @@
+cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/chatbot-stream
+
diff --git a/multi-agentic-travel-advisor/chatbot/Dockerfile b/multi-agentic-travel-advisor/chatbot/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..0d9366122fa3798243b37758b6db670c4983a3f0
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/Dockerfile
@@ -0,0 +1,44 @@
+# Use a slim Python base image
+FROM python:3.10-slim
+
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update -y && \
+    apt-get install -y --no-install-recommends \
+        build-essential \
+        gcc \
+        libffi-dev \
+        libssl-dev \
+        git \
+        curl \
+        ca-certificates \
+        tzdata \
+        protobuf-compiler && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set timezone
+ARG TIMEZONE=Europe/Berlin
+ENV TZ=$TIMEZONE
+RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
+
+# Set working directory
+WORKDIR /chatbot
+
+# Copy requirements and install
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Compile .proto file
+RUN python3 -m grpc_tools.protoc \
+    --proto_path=. \
+    --python_out=. \
+    --grpc_python_out=. \
+    chatbot.proto
+
+# Run the server
+CMD ["python3", "-u", "server.py"]
+
diff --git a/multi-agentic-travel-advisor/chatbot/app.py b/multi-agentic-travel-advisor/chatbot/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..718fe520fa2ea4a205f97e07c23441b62cc12835
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/app.py
@@ -0,0 +1,47 @@
+from flask import Flask, render_template, request, redirect, url_for, session
+import logging
+
+# Initialize Flask app
+app = Flask(__name__)
+app.config["SECRET_KEY"] = "chat"  # Used for encrypting session data
+
+# Setup logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# This will hold the user input parameters
+parameters = []
+
+
+# This function will return the parameters list
+def get_parameters():
+    return parameters
+
+
+@app.route("/", methods=["GET", "POST"])
+def chat():
+    if request.method == "POST":
+        user_query = request.form.get("user_query", "")
+        if user_query:
+            logger.info(f"User query added to parameters: {user_query}")
+            parameters.clear()
+            parameters.append(user_query)  # Add the new query to the parameters list
+
+            try:
+                app.chatbot_instance.receive_new_query(
+                    user_query
+                )  # Using the shared instance
+            except Exception as e:
+                print("Error with chatbot_instance:", e)
+
+        return redirect(url_for("chat"))
+
+    chat_history = []
+    try:
+        with open("chat_log.txt", "r", encoding="utf-8") as log_file:
+            lines = log_file.readlines()
+            chat_history = [line.strip() for line in lines if line.strip()]
+    except FileNotFoundError:
+        chat_history = ["No conversation history yet."]
+
+    return render_template("chat.html", chat_history=chat_history)
diff --git a/multi-agentic-travel-advisor/chatbot/build_and_deploy.sh b/multi-agentic-travel-advisor/chatbot/build_and_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..96ee64d174fe27f4bab58a2cafa007df83227005
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/build_and_deploy.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Variables
+IMAGE_NAME="chatbot-stream"
+REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
+REPO_PATH="tutorials/multi-agent-new"
+TAG="latest"  
+
+# Step 1: Build the Docker image
+echo "Building Docker image..."
+docker build -t $IMAGE_NAME .
+
+# Step 2: Tag the Docker image
+echo "Tagging Docker image..."
+docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Step 3: Push the Docker image to the registry
+echo "Pushing Docker image to registry..."
+docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Check if the image was pushed successfully
+if [ $? -eq 0 ]; then
+  echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
+else
+  echo "Error: Docker push failed!"
+  exit 1
+fi
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/chatbot/chatbot.proto b/multi-agentic-travel-advisor/chatbot/chatbot.proto
new file mode 100644
index 0000000000000000000000000000000000000000..d7ded7e2f8c1ac41b58230368b7acec9012bc763
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/chatbot.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+message Empty {
+
+}
+
+message UserQuery {
+  string text = 1;
+}
+
+message AgentResponse {
+  string text = 1;
+}
+
+service Chat {
+  rpc requestUserQueriesToPlanner(Empty) returns (stream UserQuery);  
+  rpc processAgentResponsesFromPlanner(stream AgentResponse) returns (Empty);  
+}
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/chatbot/chatbot_pb2.py b/multi-agentic-travel-advisor/chatbot/chatbot_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e888ff4d31a62b19c7d571ae67ef4a81911837d
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/chatbot_pb2.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: chatbot.proto
+"""Generated protocol buffer code."""
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
+    b'\n\rchatbot.proto"\x07\n\x05\x45mpty"\x19\n\tUserQuery\x12\x0c\n\x04text\x18\x01 \x01(\t"\x1d\n\rAgentResponse\x12\x0c\n\x04text\x18\x01 \x01(\t2y\n\x04\x43hat\x12\x33\n\x1brequestUserQueriesToPlanner\x12\x06.Empty\x1a\n.UserQuery0\x01\x12<\n processAgentResponsesFromPlanner\x12\x0e.AgentResponse\x1a\x06.Empty(\x01\x62\x06proto3'
+)
+
+
+_EMPTY = DESCRIPTOR.message_types_by_name["Empty"]
+_USERQUERY = DESCRIPTOR.message_types_by_name["UserQuery"]
+_AGENTRESPONSE = DESCRIPTOR.message_types_by_name["AgentResponse"]
+Empty = _reflection.GeneratedProtocolMessageType(
+    "Empty",
+    (_message.Message,),
+    {
+        "DESCRIPTOR": _EMPTY,
+        "__module__": "chatbot_pb2",
+        # @@protoc_insertion_point(class_scope:Empty)
+    },
+)
+_sym_db.RegisterMessage(Empty)
+
+UserQuery = _reflection.GeneratedProtocolMessageType(
+    "UserQuery",
+    (_message.Message,),
+    {
+        "DESCRIPTOR": _USERQUERY,
+        "__module__": "chatbot_pb2",
+        # @@protoc_insertion_point(class_scope:UserQuery)
+    },
+)
+_sym_db.RegisterMessage(UserQuery)
+
+AgentResponse = _reflection.GeneratedProtocolMessageType(
+    "AgentResponse",
+    (_message.Message,),
+    {
+        "DESCRIPTOR": _AGENTRESPONSE,
+        "__module__": "chatbot_pb2",
+        # @@protoc_insertion_point(class_scope:AgentResponse)
+    },
+)
+_sym_db.RegisterMessage(AgentResponse)
+
+_CHAT = DESCRIPTOR.services_by_name["Chat"]
+if _descriptor._USE_C_DESCRIPTORS == False:
+    DESCRIPTOR._options = None
+    _EMPTY._serialized_start = 17
+    _EMPTY._serialized_end = 24
+    _USERQUERY._serialized_start = 26
+    _USERQUERY._serialized_end = 51
+    _AGENTRESPONSE._serialized_start = 53
+    _AGENTRESPONSE._serialized_end = 82
+    _CHAT._serialized_start = 84
+    _CHAT._serialized_end = 205
+# @@protoc_insertion_point(module_scope)
diff --git a/multi-agentic-travel-advisor/chatbot/chatbot_pb2_grpc.py b/multi-agentic-travel-advisor/chatbot/chatbot_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..afab2c32f58ec21cd65f39ce35e17646a555ea87
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/chatbot_pb2_grpc.py
@@ -0,0 +1,123 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+
+import grpc
+
+import chatbot_pb2 as chatbot__pb2
+
+
+class ChatStub(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.requestUserQueriesToPlanner = channel.unary_stream(
+            "/Chat/requestUserQueriesToPlanner",
+            request_serializer=chatbot__pb2.Empty.SerializeToString,
+            response_deserializer=chatbot__pb2.UserQuery.FromString,
+        )
+        self.processAgentResponsesFromPlanner = channel.stream_unary(
+            "/Chat/processAgentResponsesFromPlanner",
+            request_serializer=chatbot__pb2.AgentResponse.SerializeToString,
+            response_deserializer=chatbot__pb2.Empty.FromString,
+        )
+
+
+class ChatServicer(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def requestUserQueriesToPlanner(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details("Method not implemented!")
+        raise NotImplementedError("Method not implemented!")
+
+    def processAgentResponsesFromPlanner(self, request_iterator, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details("Method not implemented!")
+        raise NotImplementedError("Method not implemented!")
+
+
+def add_ChatServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        "requestUserQueriesToPlanner": grpc.unary_stream_rpc_method_handler(
+            servicer.requestUserQueriesToPlanner,
+            request_deserializer=chatbot__pb2.Empty.FromString,
+            response_serializer=chatbot__pb2.UserQuery.SerializeToString,
+        ),
+        "processAgentResponsesFromPlanner": grpc.stream_unary_rpc_method_handler(
+            servicer.processAgentResponsesFromPlanner,
+            request_deserializer=chatbot__pb2.AgentResponse.FromString,
+            response_serializer=chatbot__pb2.Empty.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler("Chat", rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class Chat(object):
+    """Missing associated documentation comment in .proto file."""
+
+    @staticmethod
+    def requestUserQueriesToPlanner(
+        request,
+        target,
+        options=(),
+        channel_credentials=None,
+        call_credentials=None,
+        insecure=False,
+        compression=None,
+        wait_for_ready=None,
+        timeout=None,
+        metadata=None,
+    ):
+        return grpc.experimental.unary_stream(
+            request,
+            target,
+            "/Chat/requestUserQueriesToPlanner",
+            chatbot__pb2.Empty.SerializeToString,
+            chatbot__pb2.UserQuery.FromString,
+            options,
+            channel_credentials,
+            insecure,
+            call_credentials,
+            compression,
+            wait_for_ready,
+            timeout,
+            metadata,
+        )
+
+    @staticmethod
+    def processAgentResponsesFromPlanner(
+        request_iterator,
+        target,
+        options=(),
+        channel_credentials=None,
+        call_credentials=None,
+        insecure=False,
+        compression=None,
+        wait_for_ready=None,
+        timeout=None,
+        metadata=None,
+    ):
+        return grpc.experimental.stream_unary(
+            request_iterator,
+            target,
+            "/Chat/processAgentResponsesFromPlanner",
+            chatbot__pb2.AgentResponse.SerializeToString,
+            chatbot__pb2.Empty.FromString,
+            options,
+            channel_credentials,
+            insecure,
+            call_credentials,
+            compression,
+            wait_for_ready,
+            timeout,
+            metadata,
+        )
diff --git a/multi-agentic-travel-advisor/chatbot/client.py b/multi-agentic-travel-advisor/chatbot/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c0ea20f6d6fb3ca8fedfd2c72e468b96ced1462
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/client.py
@@ -0,0 +1,20 @@
+import grpc
+import chatbot_pb2
+import chatbot_pb2_grpc
+
+
+def run():
+    with grpc.insecure_channel("localhost:8061") as channel:
+        stub = chatbot_pb2_grpc.ChatStub(channel)
+        print("🌐 Connected to gRPC server")
+
+        try:
+            responses = stub.requestUserQueriesToPlanner(chatbot_pb2.Empty())
+            for response in responses:
+                print(f"✅ Received user query: {response.text}")
+        except grpc.RpcError as e:
+            print(f"gRPC Error: {e.code()} - {e.details()}")
+
+
+if __name__ == "__main__":
+    run()
diff --git a/multi-agentic-travel-advisor/chatbot/requirements.txt b/multi-agentic-travel-advisor/chatbot/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..52b2ced30fe80c1d8a0992909ceb6ff35905153b
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/requirements.txt
@@ -0,0 +1,6 @@
+grpcio==1.38.0
+grpcio-tools==1.38.0
+grpc-interceptor
+protobuf==3.16.0
+multithreading
+Flask
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/chatbot/server.py b/multi-agentic-travel-advisor/chatbot/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..25c5c5c180b9a31fc1a1d8a328fc3487f1a2ecee
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/server.py
@@ -0,0 +1,121 @@
+import grpc
+from concurrent import futures
+import logging
+import threading
+import queue
+
+# Import generated classes
+import chatbot_pb2
+import chatbot_pb2_grpc
+import re
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+port = 8061
+
+
+class Chatbot(chatbot_pb2_grpc.ChatServicer):
+    def __init__(self):
+        super().__init__()
+        self.allow_send = False
+        self.query_text = None
+        self.log_queue = queue.Queue()
+        self.lock = threading.Lock()
+        self.condition = threading.Condition(self.lock)
+        self.log_thread = threading.Thread(target=self.log_writer)
+        self.log_thread.daemon = True
+        self.log_thread.start()
+
+    def log_writer(self):
+        """Background thread that writes logs asynchronously."""
+        while True:
+            query = self.log_queue.get()
+            try:
+                with open("chat_log.txt", "a", encoding="utf-8") as log_file:
+                    log_file.write(f"User: {query}\n")
+                    print(f"User query written to chat_log.txt: {query}")
+            except Exception as e:
+                print(f"Error writing to log file: {e}")
+            self.log_queue.task_done()
+
+    def receive_new_query(self, query_text):
+        """Receives new user query and notifies the streaming generator."""
+        print(f"receive_new_query called with: {query_text}")
+        with self.condition:
+            self.query_text = query_text
+            self.allow_send = True
+            self.condition.notify()
+
+    def requestUserQueriesToPlanner(self, request, context):
+        print("âš¡ requestUserQueriesToPlanner started (streaming begins)")
+
+        while context.is_active():
+            with self.condition:
+                while not (self.allow_send and self.query_text):
+                    print("Waiting for new query...")
+                    self.condition.wait(timeout=5)
+
+                    if not context.is_active():
+                        print("⚠️ Client disconnected.")
+                        return
+
+                current_query = self.query_text
+                self.allow_send = False
+                self.query_text = None
+
+            print(f"🔄 Yielding query: {current_query}")
+            self.log_queue.put(current_query)
+            yield chatbot_pb2.UserQuery(text=current_query)
+
+        print("❗ Exiting streaming loop - client context is no longer active.")
+
+    def processAgentResponsesFromPlanner(self, request_iterator, context):
+        print("Connected to processAgentResponsesFromPlanner....")
+        for response in request_iterator:
+            print(f"Received bot response: {response.text}")
+            # Format the bot response (you can use HTML tags here)
+            formatted_response = self.format_bot_response(response.text)
+
+            with open("chat_log.txt", "a", encoding="utf-8") as log_file:
+                log_file.write(f"Bot: {formatted_response}\n")
+        return chatbot_pb2.Empty()
+
+    def format_bot_response(self, response_text):
+        formatted_response = re.sub(r"\*\*(.*?)\*\*", r"<b>\1</b>", response_text)
+        formatted_response = formatted_response.replace("\n", "<br>")
+        formatted_response = re.sub(
+            r"(https?://[^\s]+)",
+            r'<a href="\1" target="_blank">\1</a>',
+            formatted_response,
+        )
+        formatted_response = re.sub(r"\d+\.\s", "", formatted_response)
+        formatted_response = formatted_response.replace("- ", "")
+
+        return formatted_response
+
+
+chatbot_instance = Chatbot()  # Global and shared
+
+
+def serve():
+    # gRPC Server setup
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    chatbot_pb2_grpc.add_ChatServicer_to_server(chatbot_instance, server)
+    server.add_insecure_port(f"[::]:{port}")
+    logger.info(f"Starting streaming server on port {port}")
+    server.start()
+    threading.Thread(target=app_run).start()
+
+    server.wait_for_termination()
+
+
+def app_run():
+    from app import app
+
+    app.chatbot_instance = chatbot_instance
+    app.run(host="0.0.0.0", port=8062, debug=False)
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    serve()
diff --git a/multi-agentic-travel-advisor/chatbot/templates/chat.html b/multi-agentic-travel-advisor/chatbot/templates/chat.html
new file mode 100644
index 0000000000000000000000000000000000000000..391b40198dc7103b4a63f59c2fb1a60240582623
--- /dev/null
+++ b/multi-agentic-travel-advisor/chatbot/templates/chat.html
@@ -0,0 +1,181 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <title>Travel Advisor</title>
+    
+    <!-- Leaflet.js CSS -->
+    <link rel="stylesheet" href="https://unpkg.com/leaflet@1.7.1/dist/leaflet.css" />
+    
+    <!-- Font Awesome for icons -->
+    <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.0/css/all.min.css">
+    
+    <style>
+        body, html {
+            height: 100%;
+            margin: 0;
+            font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
+        }
+    
+        /* Create the map container to be the background */
+        #map {
+            position: absolute;
+            top: 0;
+            left: 0;
+            right: 0;
+            bottom: 0;
+            z-index: -1;  /* Ensure the map is behind everything else */
+        }
+    
+        .overlay {
+            background: rgba(255, 255, 255, 0.85);
+            min-height: 100vh;
+            padding: 40px 20px;
+            display: flex;
+            flex-direction: column;
+            align-items: center;
+            z-index: 1;
+        }
+    
+        h1 {
+            font-size: 2.5em;
+            margin-bottom: 10px;
+            color: #2c3e50;
+        }
+    
+        form {
+            display: flex;
+            background: #fff;
+            padding: 15px 20px;
+            border-radius: 10px;
+            box-shadow: 0 2px 10px rgba(0, 0, 0, 0.15);
+            width: 100%;
+            max-width: 600px;
+            margin-bottom: 30px;
+            gap: 10px;
+        }
+    
+        input[type="text"] {
+            flex: 1;
+            padding: 12px;
+            font-size: 16px;
+            border: 1px solid #ccc;
+            border-radius: 6px;
+        }
+    
+        button {
+            background-color: #3498db;
+            color: white;
+            border: none;
+            border-radius: 6px;
+            padding: 12px 20px;
+            font-size: 16px;
+            cursor: pointer;
+            transition: background-color 0.3s ease;
+        }
+    
+        button:hover {
+            background-color: #2980b9;
+        }
+    
+        .chat-box {
+            background: white;
+            overflow-y: auto;
+            padding: 15px;
+            border-radius: 8px;
+            width: 100%;
+            max-width: 600px;
+            box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
+            font-size: 14px; /* Reduced font size for better readability */
+            word-wrap: break-word; /* Ensures words break appropriately */
+        }
+
+        .chat-line {
+            padding: 8px 12px;  /* Reduced padding */
+            margin: 8px 0;      /* Added margin for separation between lines */
+            border-radius: 8px;
+            display: flex;
+            flex-direction: column; /* Ensure messages are stacked vertically */
+            align-items: flex-start; /* Align text to the left */
+            gap: 8px;           /* Reduced gap between text and icon */
+            white-space: pre-wrap; /* Ensures line breaks are respected */
+            max-width: 100%;    /* Ensures lines don’t break out of the container */
+            word-wrap: break-word; /* Allow words to wrap within the container */
+            overflow-wrap: break-word; /* Forces long words to break and wrap */
+            word-break: break-all; /* Breaks long words at any point */
+        }
+
+        .user {
+            text-align: right;
+            background-color: #d0f0fd;
+            align-self: flex-end;
+        }
+
+        .bot {
+            text-align: left;
+            background-color: #fef1e6;
+            align-self: flex-start;
+        }
+
+        .chat-line i {
+            margin-right: 8px; /* Reduced icon margin */
+            color: #555;
+        }
+
+        /* Smaller font size for mobile screens */
+        @media (max-width: 600px) {
+            .chat-box {
+                font-size: 12px; /* Even smaller text on smaller screens */
+            }
+        }
+
+    </style>
+    
+</head>
+<body>
+    <div id="map"></div>  <!-- Map container for interactive background -->
+
+    <div class="overlay">
+        <h1><i class="fas fa-plane-departure"></i> Travel Advisor</h1>
+
+        <form method="POST">
+            <input type="text" id="user_query" name="user_query" placeholder="Ask right away!..." required>
+            <button type="submit"><i class="fas fa-paper-plane"></i></button>
+        </form>
+
+        <div class="chat-box">
+            <h2><i class="fas fa-comments"></i> Chat History</h2>
+            {% for line in chat_history %}
+                <p class="chat-line {{ 'user' if 'User:' in line else 'bot' }}">
+                    <i class="fas {{ 'fa-user' if 'User:' in line else 'fa-robot' }}"></i>
+                    <!-- Using the |safe filter to render HTML content -->
+                    {{ line|safe }}
+                </p>
+            {% endfor %}
+        </div>
+        
+    </div>
+
+    <script src="https://unpkg.com/leaflet@1.7.1/dist/leaflet.js"></script>
+    <script>
+        var map = L.map('map').setView([51.505, -0.09], 2);
+
+        L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
+            attribution: '&copy; <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors'
+        }).addTo(map);
+
+        L.marker([51.505, -0.09]).addTo(map)
+            .bindPopup("<b>Welcome!</b><br>Ask me about your next trip.")
+            .openPopup();
+    </script>
+
+    <script>
+        window.onload = function() {
+            const chatBox = document.querySelector('.chat-box');
+            chatBox.scrollTop = chatBox.scrollHeight;
+        };
+    </script>
+    
+</body>
+</html>
diff --git a/multi-agentic-travel-advisor/food_rec_agent/DOCKER_TAG b/multi-agentic-travel-advisor/food_rec_agent/DOCKER_TAG
new file mode 100644
index 0000000000000000000000000000000000000000..1fe6d96a185b0d83b5e20f317485b4308259712d
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/DOCKER_TAG
@@ -0,0 +1 @@
+cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/food-rec-agent-s
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/food_rec_agent/Dockerfile b/multi-agentic-travel-advisor/food_rec_agent/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..c1d8bdf80cdc49229de923de3e93a93681e04d6f
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/Dockerfile
@@ -0,0 +1,44 @@
+# Use a slim Python base image
+FROM python:3.10-slim
+
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update -y && \
+    apt-get install -y --no-install-recommends \
+        build-essential \
+        gcc \
+        libffi-dev \
+        libssl-dev \
+        git \
+        curl \
+        ca-certificates \
+        tzdata \
+        protobuf-compiler && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set timezone
+ARG TIMEZONE=Europe/Berlin
+ENV TZ=$TIMEZONE
+RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
+
+# Set working directory
+WORKDIR /food-agent
+
+# Copy requirements and install
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Compile .proto file
+RUN python3 -m grpc_tools.protoc \
+    --proto_path=. \
+    --python_out=. \
+    --grpc_python_out=. \
+    agent.proto
+
+# Run the server
+CMD ["python3", "-u", "server.py"]
+
diff --git a/multi-agentic-travel-advisor/food_rec_agent/agent.proto b/multi-agentic-travel-advisor/food_rec_agent/agent.proto
new file mode 100644
index 0000000000000000000000000000000000000000..7c7cd679f012e7fd29a7bc84d1e08300b2f2d3b8
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/agent.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+
+message Empty {
+
+}
+
+message UserQuery {
+    string text = 1;
+}
+
+message AgentRequest {
+    string text = 1;
+    AgentType agent_type = 2;
+}
+
+enum AgentType {
+    UNKNOWN = 0;
+    WEATHER = 1;
+    PLACES = 2;
+    FOOD = 3;
+    MAP = 4;
+}
+
+message AgentResponse {
+    string text = 1;
+}
+
+service Agent {
+    rpc evaluateAgentRequestsFromPlanner(stream AgentRequest) returns (stream AgentResponse);
+}
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/food_rec_agent/build_and_deploy.sh b/multi-agentic-travel-advisor/food_rec_agent/build_and_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d0cc4a4a583c46d19f8d93750e6cdb586a394419
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/build_and_deploy.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Variables
+IMAGE_NAME="food-rec-agent-s"
+REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
+REPO_PATH="tutorials/multi-agent-new"
+TAG="latest"  
+
+# Step 1: Build the Docker image
+echo "Building Docker image..."
+docker build -t $IMAGE_NAME .
+
+# Step 2: Tag the Docker image
+echo "Tagging Docker image..."
+docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Step 3: Push the Docker image to the registry
+echo "Pushing Docker image to registry..."
+docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Check if the image was pushed successfully
+if [ $? -eq 0 ]; then
+  echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
+else
+  echo "Error: Docker push failed!"
+  exit 1
+fi
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/food_rec_agent/client.py b/multi-agentic-travel-advisor/food_rec_agent/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..2037ac24701fcd2e375d9ee289605df1b544a432
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/client.py
@@ -0,0 +1,17 @@
+import grpc
+import agent_pb2
+import agent_pb2_grpc
+
+
+def run():
+    channel = grpc.insecure_channel("localhost:8061")
+    stub = agent_pb2_grpc.AgentStub(channel)
+    user_query = input("Please enter your query (e.g., 'Is it sunny in Rome?'): ")
+    request = agent_pb2.UserQuery(text=user_query)
+    response = stub.ProcessUserQuery(request)
+    print("Response from agent:", response.text)
+    channel.close()
+
+
+if __name__ == "__main__":
+    run()
diff --git a/multi-agentic-travel-advisor/food_rec_agent/requirements.txt b/multi-agentic-travel-advisor/food_rec_agent/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..014cced4908f330d725fb4891d57b21fc57b4c1b
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/requirements.txt
@@ -0,0 +1,7 @@
+grpcio==1.38.0
+grpcio-tools==1.38.0
+grpc-interceptor
+protobuf==3.16.0
+multithreading
+openai
+requests
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/food_rec_agent/server.py b/multi-agentic-travel-advisor/food_rec_agent/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..717a6b3a8f24a16ba0f036f3a0852cdcd83d78d9
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/server.py
@@ -0,0 +1,94 @@
+import grpc
+import json
+from concurrent import futures
+
+import agent_pb2
+import agent_pb2_grpc
+
+from tool_calling.tools import get_food_recommendations, tools
+from tool_calling.llm_client import client, deployment_name
+
+
+def run_conversation(prompt: str) -> str:
+    messages = [{"role": "user", "content": prompt}]
+
+    response = client.chat.completions.create(
+        model=deployment_name, messages=messages, tools=tools, tool_choice="required"
+    )
+
+    response_message = response.choices[0].message
+    messages.append(response_message)
+
+    print("\nModel's Response:")
+    print(response_message)
+
+    if response_message.tool_calls:
+        for tool_call in response_message.tool_calls:
+            function_name = tool_call.function.name
+            function_args = json.loads(tool_call.function.arguments)
+
+            print("------------------TOOL CALLING---------------------------\n")
+            print(f"\nFunction Call: {function_name}")
+            print(f"Arguments: {function_args}")
+            print("------------------TOOL CALLING---------------------------\n")
+
+            if function_name == "get_food_recommendations":
+                function_response = get_food_recommendations(
+                    place=function_args.get("place")
+                )
+            else:
+                function_response = json.dumps({"error": "Unknown function"})
+
+            messages.append(
+                {
+                    "tool_call_id": tool_call.id,
+                    "role": "tool",
+                    "name": function_name,
+                    "content": function_response,
+                }
+            )
+
+    else:
+        print("No tool calls were made by the model.")
+
+    # Second API call: Get final response
+    final_response = client.chat.completions.create(
+        model=deployment_name,
+        messages=messages,
+    )
+
+    return final_response.choices[0].message.content
+
+
+class FoodRecAgent(agent_pb2_grpc.AgentServicer):
+    def __init__(self):
+        self.latest_food_info = None
+
+    def evaluateAgentRequestsFromPlanner(self, request_iterator, context):
+        for request in request_iterator:
+            if request.agent_type == agent_pb2.FOOD:
+                print(f"\nReceived query: {request.text}")
+                food_info = run_conversation(request.text)
+
+                if food_info is None:
+                    food_info = "No food rec. info available."
+
+                self.latest_food_info = food_info
+                print(f"Food Rec. info set to: {self.latest_food_info}")
+
+                response = agent_pb2.AgentResponse(text=food_info)
+                yield response
+
+
+def serve():
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    agent_pb2_grpc.add_AgentServicer_to_server(FoodRecAgent(), server)
+    port = 8061
+    server.add_insecure_port(f"[::]:{port}")
+    print(f"Food Rec. agent server started on port {port}...")
+    server.start()
+    server.wait_for_termination()
+
+
+if __name__ == "__main__":
+    serve()
diff --git a/multi-agentic-travel-advisor/food_rec_agent/tool_calling/llm_client.py b/multi-agentic-travel-advisor/food_rec_agent/tool_calling/llm_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5342b16264873b5880dba4cb9e13711f7e6a2e3
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/tool_calling/llm_client.py
@@ -0,0 +1,28 @@
+from openai import OpenAI
+import os
+from base64 import b64encode
+
+platform = "OpenAI"
+# "Llama-3.3-70B-Instruct"
+# "gpt-4o-mini"
+deployment_name = "gpt-4o-mini"
+
+
+if platform == "GenAI":
+    # Initialize OpenAI client
+    genai_username = os.getenv("IAIS_GENAI_USERNAME")
+    genai_password = os.getenv("IAIS_GENAI_PASSWORD")
+    token_string = f"{genai_username}:{genai_password}"
+    token_bytes = b64encode(token_string.encode())
+
+    client = OpenAI(
+        api_key="xxxx",
+        default_headers={"Authorization": f"Basic {token_bytes.decode()}"},
+        base_url="https://genai.iais.fraunhofer.de/api/v2",
+    )
+
+elif platform == "OpenAI":
+    os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
+    # os.environ["OPENAI_ORG_ID"] = os.getenv("OPENAI_ORG_ID")
+
+    client = OpenAI()
diff --git a/multi-agentic-travel-advisor/food_rec_agent/tool_calling/tools.py b/multi-agentic-travel-advisor/food_rec_agent/tool_calling/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..f176adba76598e2cba88dfa21240a03d32b922ce
--- /dev/null
+++ b/multi-agentic-travel-advisor/food_rec_agent/tool_calling/tools.py
@@ -0,0 +1,108 @@
+import requests
+import json
+
+
+tools = [
+    {
+        "type": "function",
+        "function": {
+            "name": "get_food_recommendations",
+            "description": "Fetches food recommendations for a given place using the OpenFoodFacts API.",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "place": {
+                        "type": "string",
+                        "description": "The place to search for food recommendations.",
+                    },
+                    "limit": {
+                        "type": "integer",
+                        "description": "The maximum number of food recommendations to return.",
+                        "default": 20,
+                    },
+                },
+                "required": ["place"],
+                "additionalProperties": False,
+            },
+        },
+    }
+]
+
+import requests
+import json
+from typing import List
+
+
+def get_food_recommendations(place: str, limit: int = 20) -> str:
+    """
+    Fetches food product recommendations from OpenFoodFacts based on a given place.
+
+    Args:
+        place (str): The place or keyword to search for food-related items.
+        limit (int): Maximum number of recommendations to return.
+
+    Returns:
+        str: A JSON-formatted string containing the status and either data or error message.
+    """
+
+    api_url = "https://world.openfoodfacts.org/cgi/search.pl"
+    params = {"search_terms": place, "action": "process", "json": 1, "page_size": 100}
+
+    try:
+        response = requests.get(api_url, params=params, timeout=10)
+        response.raise_for_status()
+    except requests.RequestException as e:
+        return json.dumps(
+            {"status": "error", "message": f"Error fetching data: {str(e)}"}, indent=4
+        )
+
+    data = response.json()
+    products = data.get("products", [])
+
+    valid_categories = {
+        "foods",
+        "beverages",
+        "snacks",
+        "dairy",
+        "cheese",
+        "meat",
+        "sweets",
+        "drinks",
+        "biscuits",
+        "cereals",
+        "bread",
+        "pasta",
+        "rice",
+        "spreads",
+        "chocolates",
+        "cookies",
+        "candies",
+        "sauces",
+        "soups",
+        "juices",
+        "alcoholic beverages",
+    }
+
+    recommendations: List[str] = []
+
+    for product in products:
+        name = product.get("product_name", "").strip()
+        categories = product.get("categories_tags", [])
+
+        if name and any(
+            cat.replace("en:", "") in valid_categories for cat in categories
+        ):
+            recommendations.append(name)
+        if len(recommendations) >= limit:
+            break
+
+    if recommendations:
+        return json.dumps({"status": "success", "data": recommendations}, indent=4)
+    else:
+        return json.dumps(
+            {
+                "status": "error",
+                "message": f"No valid food recommendations found for '{place}'.",
+            },
+            indent=4,
+        )
diff --git a/multi-agentic-travel-advisor/places_agent/DOCKER_TAG b/multi-agentic-travel-advisor/places_agent/DOCKER_TAG
new file mode 100644
index 0000000000000000000000000000000000000000..e0308a5d7280f9b86827e4ec89ccc7b39a539813
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/DOCKER_TAG
@@ -0,0 +1 @@
+cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/places-agent-s
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/places_agent/Dockerfile b/multi-agentic-travel-advisor/places_agent/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..be59e34c1b827a1a2af1a96f3f188228f4876401
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/Dockerfile
@@ -0,0 +1,44 @@
+# Use a slim Python base image
+FROM python:3.10-slim
+
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update -y && \
+    apt-get install -y --no-install-recommends \
+        build-essential \
+        gcc \
+        libffi-dev \
+        libssl-dev \
+        git \
+        curl \
+        ca-certificates \
+        tzdata \
+        protobuf-compiler && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set timezone
+ARG TIMEZONE=Europe/Berlin
+ENV TZ=$TIMEZONE
+RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
+
+# Set working directory
+WORKDIR /places-agent
+
+# Copy requirements and install
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Compile .proto file
+RUN python3 -m grpc_tools.protoc \
+    --proto_path=. \
+    --python_out=. \
+    --grpc_python_out=. \
+    agent.proto
+
+# Run the server
+CMD ["python3", "-u", "server.py"]
+
diff --git a/multi-agentic-travel-advisor/places_agent/agent.proto b/multi-agentic-travel-advisor/places_agent/agent.proto
new file mode 100644
index 0000000000000000000000000000000000000000..92378c237d527ef36d8305b5ae17ff867c3193b1
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/agent.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+
+message Empty {
+
+}
+
+message UserQuery {
+    string text = 1;
+}
+
+message AgentRequest {
+    string text = 1;
+    AgentType agent_type = 2;
+}
+
+enum AgentType {
+    UNKNOWN = 0;
+    WEATHER = 1;
+    PLACES = 2;
+    FOOD = 3;
+    MAP = 4;
+}
+
+message AgentResponse {
+    string text = 1;
+}
+
+service Agent {
+    rpc evaluateAgentRequestsFromPlanner(stream AgentRequest) returns (stream AgentResponse);
+}
diff --git a/multi-agentic-travel-advisor/places_agent/agent_pb2.py b/multi-agentic-travel-advisor/places_agent/agent_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..34ce94df2b4fa9f4eca95d15072474b527e41683
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/agent_pb2.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: agent.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+
+_runtime_version.ValidateProtobufRuntimeVersion(
+    _runtime_version.Domain.PUBLIC, 5, 29, 0, "", "agent.proto"
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
+    b'\n\x0b\x61gent.proto"\x19\n\tUserQuery\x12\x0c\n\x04text\x18\x01 \x01(\t"<\n\x0c\x41gentRequest\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x1e\n\nagent_type\x18\x02 \x01(\x0e\x32\n.AgentType"&\n\rAgentResponse\x12\x15\n\rresponse_text\x18\x01 \x01(\t*E\n\tAgentType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07WEATHER\x10\x01\x12\x07\n\x03MAP\x10\x02\x12\x08\n\x04NEWS\x10\x03\x12\x0b\n\x07\x46INANCE\x10\x04\x32:\n\x05\x41gent\x12\x31\n\x10GetAgentResponse\x12\r.AgentRequest\x1a\x0e.AgentResponseb\x06proto3'
+)
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "agent_pb2", _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+    DESCRIPTOR._loaded_options = None
+    _globals["_AGENTTYPE"]._serialized_start = 144
+    _globals["_AGENTTYPE"]._serialized_end = 213
+    _globals["_USERQUERY"]._serialized_start = 15
+    _globals["_USERQUERY"]._serialized_end = 40
+    _globals["_AGENTREQUEST"]._serialized_start = 42
+    _globals["_AGENTREQUEST"]._serialized_end = 102
+    _globals["_AGENTRESPONSE"]._serialized_start = 104
+    _globals["_AGENTRESPONSE"]._serialized_end = 142
+    _globals["_AGENT"]._serialized_start = 215
+    _globals["_AGENT"]._serialized_end = 273
+# @@protoc_insertion_point(module_scope)
diff --git a/multi-agentic-travel-advisor/places_agent/agent_pb2_grpc.py b/multi-agentic-travel-advisor/places_agent/agent_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..8034da638d15114d1478517a181c361e062076b3
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/agent_pb2_grpc.py
@@ -0,0 +1,104 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+
+import grpc
+import warnings
+
+import agent_pb2 as agent__pb2
+
+GRPC_GENERATED_VERSION = "1.70.0"
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+    from grpc._utilities import first_version_is_lower
+
+    _version_not_supported = first_version_is_lower(
+        GRPC_VERSION, GRPC_GENERATED_VERSION
+    )
+except ImportError:
+    _version_not_supported = True
+
+if _version_not_supported:
+    raise RuntimeError(
+        f"The grpc package installed is at version {GRPC_VERSION},"
+        + f" but the generated code in agent_pb2_grpc.py depends on"
+        + f" grpcio>={GRPC_GENERATED_VERSION}."
+        + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+        + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
+    )
+
+
+class AgentStub(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.GetAgentResponse = channel.unary_unary(
+            "/Agent/GetAgentResponse",
+            request_serializer=agent__pb2.AgentRequest.SerializeToString,
+            response_deserializer=agent__pb2.AgentResponse.FromString,
+            _registered_method=True,
+        )
+
+
+class AgentServicer(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def GetAgentResponse(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details("Method not implemented!")
+        raise NotImplementedError("Method not implemented!")
+
+
+def add_AgentServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        "GetAgentResponse": grpc.unary_unary_rpc_method_handler(
+            servicer.GetAgentResponse,
+            request_deserializer=agent__pb2.AgentRequest.FromString,
+            response_serializer=agent__pb2.AgentResponse.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler("Agent", rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+    server.add_registered_method_handlers("Agent", rpc_method_handlers)
+
+
+# This class is part of an EXPERIMENTAL API.
+class Agent(object):
+    """Missing associated documentation comment in .proto file."""
+
+    @staticmethod
+    def GetAgentResponse(
+        request,
+        target,
+        options=(),
+        channel_credentials=None,
+        call_credentials=None,
+        insecure=False,
+        compression=None,
+        wait_for_ready=None,
+        timeout=None,
+        metadata=None,
+    ):
+        return grpc.experimental.unary_unary(
+            request,
+            target,
+            "/Agent/GetAgentResponse",
+            agent__pb2.AgentRequest.SerializeToString,
+            agent__pb2.AgentResponse.FromString,
+            options,
+            channel_credentials,
+            insecure,
+            call_credentials,
+            compression,
+            wait_for_ready,
+            timeout,
+            metadata,
+            _registered_method=True,
+        )
diff --git a/multi-agentic-travel-advisor/places_agent/build_and_deploy.sh b/multi-agentic-travel-advisor/places_agent/build_and_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bb3b9e2099e5232694c845fc12ade71715c90fd2
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/build_and_deploy.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Variables
+IMAGE_NAME="places-agent-s"
+REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
+REPO_PATH="tutorials/multi-agent-new"
+TAG="latest"  
+
+# Step 1: Build the Docker image
+echo "Building Docker image..."
+docker build -t $IMAGE_NAME .
+
+# Step 2: Tag the Docker image
+echo "Tagging Docker image..."
+docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Step 3: Push the Docker image to the registry
+echo "Pushing Docker image to registry..."
+docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Check if the image was pushed successfully
+if [ $? -eq 0 ]; then
+  echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
+else
+  echo "Error: Docker push failed!"
+  exit 1
+fi
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/places_agent/client.py b/multi-agentic-travel-advisor/places_agent/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..2037ac24701fcd2e375d9ee289605df1b544a432
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/client.py
@@ -0,0 +1,17 @@
+import grpc
+import agent_pb2
+import agent_pb2_grpc
+
+
+def run():
+    channel = grpc.insecure_channel("localhost:8061")
+    stub = agent_pb2_grpc.AgentStub(channel)
+    user_query = input("Please enter your query (e.g., 'Is it sunny in Rome?'): ")
+    request = agent_pb2.UserQuery(text=user_query)
+    response = stub.ProcessUserQuery(request)
+    print("Response from agent:", response.text)
+    channel.close()
+
+
+if __name__ == "__main__":
+    run()
diff --git a/multi-agentic-travel-advisor/places_agent/requirements.txt b/multi-agentic-travel-advisor/places_agent/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..390e8a471a15ac115034d11d913826b596f20fd0
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/requirements.txt
@@ -0,0 +1,8 @@
+grpcio==1.38.0
+grpcio-tools==1.38.0
+grpc-interceptor
+protobuf==3.16.0
+multithreading
+openai
+requests
+geopy
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/places_agent/server.py b/multi-agentic-travel-advisor/places_agent/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea59cc718acf1ec9c5f30d2a1d0eb25f39560f08
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/server.py
@@ -0,0 +1,96 @@
+import grpc
+import json
+from concurrent import futures
+
+import agent_pb2
+import agent_pb2_grpc
+
+from tool_calling.tools import get_attractions, get_wiki, tools
+from tool_calling.llm_client import client, deployment_name
+
+
+def run_conversation(prompt: str) -> str:
+    messages = [{"role": "user", "content": prompt}]
+
+    # First API call: Ask model for response or tool invocation
+    response = client.chat.completions.create(
+        model=deployment_name, messages=messages, tools=tools, tool_choice="required"
+    )
+
+    response_message = response.choices[0].message
+    messages.append(response_message)
+
+    print("\nModel's Response:")
+    print(response_message)
+
+    # If the model requests tool execution, handle it
+    if response_message.tool_calls:
+        for tool_call in response_message.tool_calls:
+            function_name = tool_call.function.name
+            function_args = json.loads(tool_call.function.arguments)
+
+            print(f"\nFunction Call: {function_name}")
+            print(f"Arguments: {function_args}")
+
+            if function_name == "get_attractions":
+                function_response = get_attractions(
+                    location=function_args.get("location")
+                )
+            elif function_name == "get_wiki":
+                function_response = get_wiki(topic=function_args.get("topic"))
+            else:
+                function_response = json.dumps({"error": "Unknown function"})
+
+            messages.append(
+                {
+                    "tool_call_id": tool_call.id,
+                    "role": "tool",
+                    "name": function_name,
+                    "content": function_response,
+                }
+            )
+
+    else:
+        print("No tool calls were made by the model.")
+
+    # Second API call: Get final response
+    final_response = client.chat.completions.create(
+        model=deployment_name,
+        messages=messages,
+    )
+
+    return final_response.choices[0].message.content
+
+
+class PlacesAgent(agent_pb2_grpc.AgentServicer):
+    def __init__(self):
+        self.latest_places_info = None
+
+    def evaluateAgentRequestsFromPlanner(self, request_iterator, context):
+        for request in request_iterator:
+            if request.agent_type == agent_pb2.PLACES:
+                print(f"\nReceived query: {request.text}")
+                places_info = run_conversation(request.text)
+
+                if places_info is None:
+                    places_info = "No weather info available."
+
+                self.latest_places_info = places_info
+                print(f"Weather info set to: {self.latest_places_info}")
+
+                response = agent_pb2.AgentResponse(text=places_info)
+                yield response
+
+
+def serve():
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    agent_pb2_grpc.add_AgentServicer_to_server(PlacesAgent(), server)
+    port = 8061
+    server.add_insecure_port(f"[::]:{port}")
+    print(f"\Places agent server started on port {port}...")
+    server.start()
+    server.wait_for_termination()
+
+
+if __name__ == "__main__":
+    serve()
diff --git a/multi-agentic-travel-advisor/places_agent/tool_calling/llm_client.py b/multi-agentic-travel-advisor/places_agent/tool_calling/llm_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5342b16264873b5880dba4cb9e13711f7e6a2e3
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/tool_calling/llm_client.py
@@ -0,0 +1,28 @@
+from openai import OpenAI
+import os
+from base64 import b64encode
+
+platform = "OpenAI"
+# "Llama-3.3-70B-Instruct"
+# "gpt-4o-mini"
+deployment_name = "gpt-4o-mini"
+
+
+if platform == "GenAI":
+    # Initialize OpenAI client
+    genai_username = os.getenv("IAIS_GENAI_USERNAME")
+    genai_password = os.getenv("IAIS_GENAI_PASSWORD")
+    token_string = f"{genai_username}:{genai_password}"
+    token_bytes = b64encode(token_string.encode())
+
+    client = OpenAI(
+        api_key="xxxx",
+        default_headers={"Authorization": f"Basic {token_bytes.decode()}"},
+        base_url="https://genai.iais.fraunhofer.de/api/v2",
+    )
+
+elif platform == "OpenAI":
+    os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
+    # os.environ["OPENAI_ORG_ID"] = os.getenv("OPENAI_ORG_ID")
+
+    client = OpenAI()
diff --git a/multi-agentic-travel-advisor/places_agent/tool_calling/tools.py b/multi-agentic-travel-advisor/places_agent/tool_calling/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..676b4c36114493f946eac80436cf0d76a29a8fcf
--- /dev/null
+++ b/multi-agentic-travel-advisor/places_agent/tool_calling/tools.py
@@ -0,0 +1,131 @@
+import requests
+import json
+from geopy.geocoders import Nominatim
+
+
+tools = [
+    {
+        "type": "function",
+        "function": {
+            "name": "get_attractions",
+            "description": "Get nearby tourist attractions for a given location.",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "location": {
+                        "type": "string",
+                        "description": "Search for nearby attractions.",
+                    }
+                },
+                "required": ["location"],
+                "additionalProperties": False,
+            },
+        },
+    },
+    {
+        "type": "function",
+        "function": {
+            "name": "get_wiki",
+            "description": "Fetches a summary and metadata for a given Wikipedia topic.",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "topic": {
+                        "type": "string",
+                        "description": "The topic to search for on Wikipedia.",
+                    }
+                },
+                "required": ["topic"],
+                "additionalProperties": False,
+            },
+        },
+    },
+]
+
+
+def get_attractions(location: str, radius: int = 500):
+    # Initialize geolocator
+    geolocator = Nominatim(user_agent="your_travel_app")
+
+    # Get latitude and longitude of the location
+    location_data = geolocator.geocode(location)
+    if not location_data:
+        return json.dumps({"error": f"Location '{location}' not found"}, indent=4)
+
+    lat, lon = location_data.latitude, location_data.longitude
+
+    # Construct the Overpass API query for tourism data
+    overpass_url = "http://overpass-api.de/api/interpreter"
+    query = f"""
+    [out:json];
+    (
+        node["tourism"](around:{radius},{lat},{lon});
+        way["tourism"](around:{radius},{lat},{lon});
+        relation["tourism"](around:{radius},{lat},{lon});
+    );
+    out center;
+    """
+
+    # Fetch data from Overpass API
+    try:
+        response = requests.get(overpass_url, params={"data": query})
+        response.raise_for_status()  # Raises an error for bad responses (4xx or 5xx)
+        data = response.json()
+    except requests.exceptions.RequestException as e:
+        return json.dumps({"error": f"Error fetching attractions: {str(e)}"}, indent=4)
+
+    # Process the attraction data
+    attractions = []
+    for element in data.get("elements", []):
+        tags = element.get("tags", {})
+
+        name = (
+            tags.get("name")
+            or tags.get("official_name")
+            or tags.get("brand")
+            or tags.get("operator")
+        )
+        tourism_type = tags.get("tourism", "Unknown Type")
+        latitude = element.get("lat") or element.get("center", {}).get("lat")
+        longitude = element.get("lon") or element.get("center", {}).get("lon")
+
+        if name and latitude and longitude:
+            attraction_info = {
+                "name": name,
+                "type": tourism_type,
+                "latitude": latitude,
+                "longitude": longitude,
+            }
+            attractions.append(attraction_info)
+
+    return json.dumps({"attractions": attractions}, indent=4)
+
+
+def get_wiki(topic: str):
+    topic_for_url = topic.replace(" ", "_")
+    wiki_url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic_for_url}"
+
+    # Fetch data from Wikipedia API
+    try:
+        response = requests.get(wiki_url)
+        response.raise_for_status()  # Raises an error for bad responses (4xx or 5xx)
+        data = response.json()
+    except requests.exceptions.RequestException as e:
+        return json.dumps(
+            {"error": f"Wikipedia page for '{topic}' not found: {str(e)}"}, indent=4
+        )
+
+    # Extract useful information from the response
+    title = data.get("title", topic)
+    summary = data.get("extract", "No summary available")
+    page_url = data.get("content_urls", {}).get("desktop", {}).get("page", "")
+    thumbnail = data.get("thumbnail", {}).get("source", "")
+
+    wiki_info = {
+        "title": title,
+        "summary": summary,
+        "page_url": page_url,
+        "thumbnail": thumbnail,
+    }
+
+    return json.dumps(wiki_info, indent=4)
diff --git a/multi-agentic-travel-advisor/planner/DOCKER_TAG b/multi-agentic-travel-advisor/planner/DOCKER_TAG
new file mode 100644
index 0000000000000000000000000000000000000000..d168727368aea2999ca29b770fcfd2039a94307e
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/DOCKER_TAG
@@ -0,0 +1 @@
+cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/planner-stream
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/planner/Dockerfile b/multi-agentic-travel-advisor/planner/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..f2760fbb8e683d3b47aaec03555921ea36b16ab0
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/Dockerfile
@@ -0,0 +1,43 @@
+# Use a slim Python base image
+FROM python:3.10-slim
+
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update -y && \
+    apt-get install -y --no-install-recommends \
+        build-essential \
+        gcc \
+        libffi-dev \
+        libssl-dev \
+        git \
+        curl \
+        ca-certificates \
+        tzdata \
+        protobuf-compiler && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set timezone
+ARG TIMEZONE=Europe/Berlin
+ENV TZ=$TIMEZONE
+RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
+
+# Set working directory
+WORKDIR /chatbot
+
+# Copy requirements and install
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Compile .proto file
+RUN python3 -m grpc_tools.protoc \
+    --proto_path=. \
+    --python_out=. \
+    --grpc_python_out=. \
+    planner.proto
+
+# Run the server
+CMD ["python3", "-u", "server.py"]
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/planner/build_and_deploy.sh b/multi-agentic-travel-advisor/planner/build_and_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4386fc09fd4063051fd7cb1a34a1facf258546c7
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/build_and_deploy.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Variables
+IMAGE_NAME="planner-stream"
+REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
+REPO_PATH="tutorials/multi-agent-new"
+TAG="latest"  
+
+# Step 1: Build the Docker image
+echo "Building Docker image..."
+docker build -t $IMAGE_NAME .
+
+# Step 2: Tag the Docker image
+echo "Tagging Docker image..."
+docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Step 3: Push the Docker image to the registry
+echo "Pushing Docker image to registry..."
+docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Check if the image was pushed successfully
+if [ $? -eq 0 ]; then
+  echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
+else
+  echo "Error: Docker push failed!"
+  exit 1
+fi
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/planner/client.py b/multi-agentic-travel-advisor/planner/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7803f462209025d8c843ea95dad02f5bb4d77be
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/client.py
@@ -0,0 +1,24 @@
+import grpc
+import planner_pb2
+import planner_pb2_grpc
+
+
+def run():
+    # Connect to the gRPC server
+    with grpc.insecure_channel("localhost:8061") as channel:
+        stub = planner_pb2_grpc.PlannerStub(channel)
+
+        # Example user query
+        query = "What's the weather like today?"
+        request = planner_pb2.UserQuery(text=query)
+
+        # Send request and get response
+        response = stub.ProcessUserQuery(request)
+
+        # Print the response
+        print(f"Response: {response.text}")
+        print(f"Agent Type: {response.agent_type}")
+
+
+if __name__ == "__main__":
+    run()
diff --git a/multi-agentic-travel-advisor/planner/planner.proto b/multi-agentic-travel-advisor/planner/planner.proto
new file mode 100644
index 0000000000000000000000000000000000000000..1a8fe43ad78b76b9ccb44b739db51c58904c7a1d
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/planner.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+message Empty {
+
+}
+
+message UserQuery {
+    string text = 1;
+}
+
+message AgentRequest {
+    string text = 1;
+    AgentType agent_type = 2;
+}
+
+enum AgentType {
+    UNKNOWN = 0;
+    WEATHER = 1;
+    PLACES = 2;
+    FOOD = 3;
+    MAP = 4;
+}
+
+message AgentResponse {
+    string text = 1;
+}
+
+service Planner {
+    rpc evaluateUserQueriesFromChatbot(stream UserQuery) returns (stream AgentRequest);
+    rpc processAgentResponsesFromAgent(stream AgentResponse) returns (stream AgentResponse);
+}
diff --git a/multi-agentic-travel-advisor/planner/requirements.txt b/multi-agentic-travel-advisor/planner/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..14e36e37882ff425467933cd3067c23030751196
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/requirements.txt
@@ -0,0 +1,9 @@
+grpcio==1.38.0
+grpcio-tools==1.38.0
+grpc-interceptor
+protobuf==3.16.0
+multithreading
+Flask
+
+torch
+transformers
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/planner/server.py b/multi-agentic-travel-advisor/planner/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad1e38807f681cb05b9456d6fed04af5b5b46e11
--- /dev/null
+++ b/multi-agentic-travel-advisor/planner/server.py
@@ -0,0 +1,84 @@
+import grpc
+from concurrent import futures
+import planner_pb2
+import planner_pb2_grpc
+from transformers import pipeline
+import logging
+import time
+
+logger = logging.getLogger(__name__)
+logging.basicConfig(level=logging.INFO)
+
+# Zero-shot classifier
+classifier = pipeline("zero-shot-classification")
+
+
+def match_agent(text):
+    candidate_labels = ["weather", "places", "food", "map"]
+    result = classifier(text, candidate_labels)
+    label = result["labels"][0]
+
+    if label == "weather":
+        return planner_pb2.WEATHER
+    elif label == "places":
+        return planner_pb2.PLACES
+    elif label == "food":
+        return planner_pb2.FOOD
+    elif label == "map":
+        return planner_pb2.MAP
+    else:
+        return planner_pb2.UNKNOWN
+
+
+class PlannerService(planner_pb2_grpc.PlannerServicer):
+    def __init__(self):
+        logger.info("PlannerService initialized.")
+
+    def evaluateUserQueriesFromChatbot(self, request_iterator, context):
+        print("evaluateUserQueriesFromChatbot..............................")
+        for user_query in request_iterator:
+            logger.info(f"Received user query: {user_query.text}")
+            agent_type = match_agent(user_query.text)
+            logger.info(f"Matched agent type: {agent_type}")
+
+            agent_response = planner_pb2.AgentRequest()
+            agent_response.text = user_query.text
+            agent_response.agent_type = agent_type
+
+            if agent_type == planner_pb2.AgentType.WEATHER:
+                logger.info("Handling WEATHER agent.")
+            elif agent_type == planner_pb2.AgentType.PLACES:
+                logger.info("Handling PLACES agent.")
+            elif agent_type == planner_pb2.AgentType.FOOD:
+                logger.info("Handling FOOD agent.")
+            elif agent_type == planner_pb2.AgentType.MAP:
+                logger.info("Handling MAP agent.")
+            else:
+                agent_response.text = "Sorry, I couldn't understand the query."
+
+            yield agent_response
+
+    def processAgentResponsesFromAgent(self, request_iterator, context):
+        logger.info("Receiving AgentResponses from agent...")
+
+        for response in request_iterator:
+            logger.info(f"Received AgentResponse: {response.text}")
+            res = planner_pb2.AgentResponse()
+            res.text = response.text
+            print(res)
+            yield res
+
+        logger.info("Finished processing AgentResponses.")
+
+
+def serve():
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    planner_pb2_grpc.add_PlannerServicer_to_server(PlannerService(), server)
+    server.add_insecure_port("[::]:8061")
+    server.start()
+    logger.info("Planner Service running on port 8061")
+    server.wait_for_termination()
+
+
+if __name__ == "__main__":
+    serve()
diff --git a/multi-agentic-travel-advisor/weather_agent/DOCKER_TAG b/multi-agentic-travel-advisor/weather_agent/DOCKER_TAG
new file mode 100644
index 0000000000000000000000000000000000000000..88791f77849ed9c82f32a870391b7c520355ea6f
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/DOCKER_TAG
@@ -0,0 +1 @@
+cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/weather-agent-s
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/weather_agent/Dockerfile b/multi-agentic-travel-advisor/weather_agent/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9ff3b8653b92c3a93faaf4d0ad506546a0101369
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/Dockerfile
@@ -0,0 +1,44 @@
+# Use a slim Python base image
+FROM python:3.10-slim
+
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update -y && \
+    apt-get install -y --no-install-recommends \
+        build-essential \
+        gcc \
+        libffi-dev \
+        libssl-dev \
+        git \
+        curl \
+        ca-certificates \
+        tzdata \
+        protobuf-compiler && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set timezone
+ARG TIMEZONE=Europe/Berlin
+ENV TZ=$TIMEZONE
+RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
+
+# Set working directory
+WORKDIR /weather-agent
+
+# Copy requirements and install
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Compile .proto file
+RUN python3 -m grpc_tools.protoc \
+    --proto_path=. \
+    --python_out=. \
+    --grpc_python_out=. \
+    agent.proto
+
+# Run the server
+CMD ["python3", "-u", "server.py"]
+
diff --git a/multi-agentic-travel-advisor/weather_agent/agent.proto b/multi-agentic-travel-advisor/weather_agent/agent.proto
new file mode 100644
index 0000000000000000000000000000000000000000..92378c237d527ef36d8305b5ae17ff867c3193b1
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/agent.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+
+message Empty {
+
+}
+
+message UserQuery {
+    string text = 1;
+}
+
+message AgentRequest {
+    string text = 1;
+    AgentType agent_type = 2;
+}
+
+enum AgentType {
+    UNKNOWN = 0;
+    WEATHER = 1;
+    PLACES = 2;
+    FOOD = 3;
+    MAP = 4;
+}
+
+message AgentResponse {
+    string text = 1;
+}
+
+service Agent {
+    rpc evaluateAgentRequestsFromPlanner(stream AgentRequest) returns (stream AgentResponse);
+}
diff --git a/multi-agentic-travel-advisor/weather_agent/build_and_deploy.sh b/multi-agentic-travel-advisor/weather_agent/build_and_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..52ee6dfdc9a6a47acb35418b6cf493348f77f290
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/build_and_deploy.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Variables
+IMAGE_NAME="weather-agent-s"
+REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
+REPO_PATH="tutorials/multi-agent-new"
+TAG="latest"  
+
+# Step 1: Build the Docker image
+echo "Building Docker image..."
+docker build -t $IMAGE_NAME .
+
+# Step 2: Tag the Docker image
+echo "Tagging Docker image..."
+docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Step 3: Push the Docker image to the registry
+echo "Pushing Docker image to registry..."
+docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
+
+# Check if the image was pushed successfully
+if [ $? -eq 0 ]; then
+  echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
+else
+  echo "Error: Docker push failed!"
+  exit 1
+fi
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/weather_agent/client.py b/multi-agentic-travel-advisor/weather_agent/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..2037ac24701fcd2e375d9ee289605df1b544a432
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/client.py
@@ -0,0 +1,17 @@
+import grpc
+import agent_pb2
+import agent_pb2_grpc
+
+
+def run():
+    channel = grpc.insecure_channel("localhost:8061")
+    stub = agent_pb2_grpc.AgentStub(channel)
+    user_query = input("Please enter your query (e.g., 'Is it sunny in Rome?'): ")
+    request = agent_pb2.UserQuery(text=user_query)
+    response = stub.ProcessUserQuery(request)
+    print("Response from agent:", response.text)
+    channel.close()
+
+
+if __name__ == "__main__":
+    run()
diff --git a/multi-agentic-travel-advisor/weather_agent/requirements.txt b/multi-agentic-travel-advisor/weather_agent/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..445cba98401577720be02cfcf62a3abc8d2baf0a
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/requirements.txt
@@ -0,0 +1,10 @@
+grpcio==1.38.0
+grpcio-tools==1.38.0
+grpc-interceptor
+protobuf==3.16.0
+multithreading
+openai
+requests
+geopy
+timezonefinder
+pytz
\ No newline at end of file
diff --git a/multi-agentic-travel-advisor/weather_agent/server.py b/multi-agentic-travel-advisor/weather_agent/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..82b649186aaf612d52e1123db3cf27eef8a641e2
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/server.py
@@ -0,0 +1,96 @@
+import grpc
+import json
+from concurrent import futures
+
+import agent_pb2
+import agent_pb2_grpc
+
+from tool_calling.tools import get_location_datetime_weather, tools
+from tool_calling.llm_client import client, deployment_name
+
+# import logging
+# logging.basicConfig(level=logging.DEBUG)
+
+
+def run_conversation(prompt: str) -> str:
+    messages = [{"role": "user", "content": prompt}]
+
+    # First API call
+    response = client.chat.completions.create(
+        model=deployment_name, messages=messages, tools=tools, tool_choice="auto"
+    )
+
+    response_message = response.choices[0].message
+    messages.append(response_message)
+
+    # If tool calls are made
+    if response_message.tool_calls:
+        for tool_call in response_message.tool_calls:
+            function_name = tool_call.function.name
+            function_args = json.loads(tool_call.function.arguments)
+
+            print("------------------TOOL CALLING---------------------------\n")
+            print(f"\nFunction Call: {function_name}")
+            print(f"Arguments: {function_args}")
+            print("------------------TOOL CALLING---------------------------\n")
+
+            if function_name == "get_location_datetime_weather":
+                function_response = get_location_datetime_weather(
+                    location=function_args.get("location")
+                )
+            else:
+                function_response = json.dumps({"error": "Unknown function"})
+
+            messages.append(
+                {
+                    "tool_call_id": tool_call.id,
+                    "role": "tool",
+                    "name": function_name,
+                    "content": function_response,
+                }
+            )
+    else:
+        print("No tool calls were made by the model.")
+
+    # Second API call
+    final_response = client.chat.completions.create(
+        model=deployment_name,
+        messages=messages,
+    )
+
+    final_answer = final_response.choices[0].message.content
+    return final_answer
+
+
+class WeatherAgent(agent_pb2_grpc.AgentServicer):
+    def __init__(self):
+        self.latest_weather_info = None
+
+    def evaluateAgentRequestsFromPlanner(self, request_iterator, context):
+        for request in request_iterator:
+            if request.agent_type == agent_pb2.WEATHER:
+                print(f"\nReceived query: {request.text}")
+                weather_info = run_conversation(request.text)
+
+                if weather_info is None:
+                    weather_info = "No weather info available."
+
+                self.latest_weather_info = weather_info
+                print(f"Weather info set to: {self.latest_weather_info}")
+
+                response = agent_pb2.AgentResponse(text=weather_info)
+                yield response
+
+
+def serve():
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    agent_pb2_grpc.add_AgentServicer_to_server(WeatherAgent(), server)
+    port = 8061
+    server.add_insecure_port(f"[::]:{port}")
+    print(f"Weather agent server started on port {port}...")
+    server.start()
+    server.wait_for_termination()
+
+
+if __name__ == "__main__":
+    serve()
diff --git a/multi-agentic-travel-advisor/weather_agent/tool_calling/llm_client.py b/multi-agentic-travel-advisor/weather_agent/tool_calling/llm_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5342b16264873b5880dba4cb9e13711f7e6a2e3
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/tool_calling/llm_client.py
@@ -0,0 +1,28 @@
+from openai import OpenAI
+import os
+from base64 import b64encode
+
+platform = "OpenAI"
+# "Llama-3.3-70B-Instruct"
+# "gpt-4o-mini"
+deployment_name = "gpt-4o-mini"
+
+
+if platform == "GenAI":
+    # Initialize OpenAI client
+    genai_username = os.getenv("IAIS_GENAI_USERNAME")
+    genai_password = os.getenv("IAIS_GENAI_PASSWORD")
+    token_string = f"{genai_username}:{genai_password}"
+    token_bytes = b64encode(token_string.encode())
+
+    client = OpenAI(
+        api_key="xxxx",
+        default_headers={"Authorization": f"Basic {token_bytes.decode()}"},
+        base_url="https://genai.iais.fraunhofer.de/api/v2",
+    )
+
+elif platform == "OpenAI":
+    os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
+    # os.environ["OPENAI_ORG_ID"] = os.getenv("OPENAI_ORG_ID")
+
+    client = OpenAI()
diff --git a/multi-agentic-travel-advisor/weather_agent/tool_calling/tools.py b/multi-agentic-travel-advisor/weather_agent/tool_calling/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..f864fdf945cd06fe9ae4725b580101d802d811fc
--- /dev/null
+++ b/multi-agentic-travel-advisor/weather_agent/tool_calling/tools.py
@@ -0,0 +1,106 @@
+import requests
+import json
+from geopy.geocoders import Nominatim
+from datetime import datetime
+from timezonefinder import TimezoneFinder
+import pytz
+from typing import Dict
+
+tools = [
+    {
+        "type": "function",
+        "function": {
+            "name": "get_location_datetime_weather",
+            "description": "Retrieves the current date, time, time zone, and weather for a given location.",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "location": {
+                        "type": "string",
+                        "description": "The name of the location for which to fetch weather data.",
+                    }
+                },
+                "required": ["location"],
+                "additionalProperties": False,
+            },
+        },
+    }
+]
+
+# Initialize once
+geolocator = Nominatim(user_agent="location_time_weather_app")
+tz_finder = TimezoneFinder()
+
+
+def get_location_datetime_weather(location: str) -> str:
+    """
+    Retrieves the current date, time, timezone, and weather data for a given location.
+
+    Args:
+        location (str): Name of the location to retrieve data for.
+
+    Returns:
+        str: JSON-formatted string with date, time, timezone, latitude/longitude, and weather.
+    """
+    try:
+        # Geocode location
+        location_data = geolocator.geocode(location)
+        if not location_data:
+            return json.dumps(
+                {"status": "error", "message": f"Location '{location}' not found."},
+                indent=4,
+            )
+
+        latitude = location_data.latitude
+        longitude = location_data.longitude
+
+        # Find timezone
+        timezone_str = tz_finder.timezone_at(lat=latitude, lng=longitude)
+        if not timezone_str:
+            return json.dumps(
+                {"status": "error", "message": "Timezone could not be determined."},
+                indent=4,
+            )
+
+        timezone = pytz.timezone(timezone_str)
+        now = datetime.now(timezone)
+
+        # Weather data from Open-Meteo
+        weather_url = (
+            f"https://api.open-meteo.com/v1/forecast"
+            f"?latitude={latitude}&longitude={longitude}&current_weather=true"
+        )
+
+        response = requests.get(weather_url, timeout=10)
+        response.raise_for_status()
+        weather_json = response.json()
+
+        current_weather = weather_json.get("current_weather", {})
+        temperature = current_weather.get("temperature", "N/A")
+        wind_speed = current_weather.get("windspeed", "N/A")
+
+        # Final result
+        result: Dict = {
+            "status": "success",
+            "location": location,
+            "latitude": latitude,
+            "longitude": longitude,
+            "date": now.strftime("%Y-%m-%d"),
+            "time": now.strftime("%H:%M:%S"),
+            "time_zone": timezone_str,
+            "weather": {
+                "temperature": f"{temperature} °C"
+                if isinstance(temperature, (int, float))
+                else "N/A",
+                "wind_speed": f"{wind_speed} km/h"
+                if isinstance(wind_speed, (int, float))
+                else "N/A",
+            },
+        }
+
+        return json.dumps(result, indent=4)
+
+    except Exception as e:
+        return json.dumps(
+            {"status": "error", "message": f"An error occurred: {str(e)}"}, indent=4
+        )