Skip to content
Snippets Groups Projects
Commit 471315a7 authored by Swetha Lakshmana Murthy's avatar Swetha Lakshmana Murthy
Browse files

New tutorial for agentic workflow

parent 291e322a
No related branches found
No related tags found
No related merge requests found
Showing
with 791 additions and 0 deletions
cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/places-agent-s
\ No newline at end of file
# Use a slim Python base image
FROM python:3.10-slim
ENV PYTHONUNBUFFERED=1
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
build-essential \
gcc \
libffi-dev \
libssl-dev \
git \
curl \
ca-certificates \
tzdata \
protobuf-compiler && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set timezone
ARG TIMEZONE=Europe/Berlin
ENV TZ=$TIMEZONE
RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
# Set working directory
WORKDIR /places-agent
# Copy requirements and install
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy project files
COPY . .
# Compile .proto file
RUN python3 -m grpc_tools.protoc \
--proto_path=. \
--python_out=. \
--grpc_python_out=. \
agent.proto
# Run the server
CMD ["python3", "-u", "server.py"]
syntax = "proto3";
message Empty {
}
message UserQuery {
string text = 1;
}
message AgentRequest {
string text = 1;
AgentType agent_type = 2;
}
enum AgentType {
UNKNOWN = 0;
WEATHER = 1;
PLACES = 2;
FOOD = 3;
MAP = 4;
}
message AgentResponse {
string text = 1;
}
service Agent {
rpc evaluateAgentRequestsFromPlanner(stream AgentRequest) returns (stream AgentResponse);
}
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: agent.proto
# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC, 5, 29, 0, "", "agent.proto"
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0b\x61gent.proto"\x19\n\tUserQuery\x12\x0c\n\x04text\x18\x01 \x01(\t"<\n\x0c\x41gentRequest\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x1e\n\nagent_type\x18\x02 \x01(\x0e\x32\n.AgentType"&\n\rAgentResponse\x12\x15\n\rresponse_text\x18\x01 \x01(\t*E\n\tAgentType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07WEATHER\x10\x01\x12\x07\n\x03MAP\x10\x02\x12\x08\n\x04NEWS\x10\x03\x12\x0b\n\x07\x46INANCE\x10\x04\x32:\n\x05\x41gent\x12\x31\n\x10GetAgentResponse\x12\r.AgentRequest\x1a\x0e.AgentResponseb\x06proto3'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "agent_pb2", _globals)
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals["_AGENTTYPE"]._serialized_start = 144
_globals["_AGENTTYPE"]._serialized_end = 213
_globals["_USERQUERY"]._serialized_start = 15
_globals["_USERQUERY"]._serialized_end = 40
_globals["_AGENTREQUEST"]._serialized_start = 42
_globals["_AGENTREQUEST"]._serialized_end = 102
_globals["_AGENTRESPONSE"]._serialized_start = 104
_globals["_AGENTRESPONSE"]._serialized_end = 142
_globals["_AGENT"]._serialized_start = 215
_globals["_AGENT"]._serialized_end = 273
# @@protoc_insertion_point(module_scope)
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import warnings
import agent_pb2 as agent__pb2
GRPC_GENERATED_VERSION = "1.70.0"
GRPC_VERSION = grpc.__version__
_version_not_supported = False
try:
from grpc._utilities import first_version_is_lower
_version_not_supported = first_version_is_lower(
GRPC_VERSION, GRPC_GENERATED_VERSION
)
except ImportError:
_version_not_supported = True
if _version_not_supported:
raise RuntimeError(
f"The grpc package installed is at version {GRPC_VERSION},"
+ f" but the generated code in agent_pb2_grpc.py depends on"
+ f" grpcio>={GRPC_GENERATED_VERSION}."
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
)
class AgentStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAgentResponse = channel.unary_unary(
"/Agent/GetAgentResponse",
request_serializer=agent__pb2.AgentRequest.SerializeToString,
response_deserializer=agent__pb2.AgentResponse.FromString,
_registered_method=True,
)
class AgentServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetAgentResponse(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_AgentServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetAgentResponse": grpc.unary_unary_rpc_method_handler(
servicer.GetAgentResponse,
request_deserializer=agent__pb2.AgentRequest.FromString,
response_serializer=agent__pb2.AgentResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler("Agent", rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
server.add_registered_method_handlers("Agent", rpc_method_handlers)
# This class is part of an EXPERIMENTAL API.
class Agent(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetAgentResponse(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/Agent/GetAgentResponse",
agent__pb2.AgentRequest.SerializeToString,
agent__pb2.AgentResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True,
)
#!/bin/bash
# Variables
IMAGE_NAME="places-agent-s"
REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
REPO_PATH="tutorials/multi-agent-new"
TAG="latest"
# Step 1: Build the Docker image
echo "Building Docker image..."
docker build -t $IMAGE_NAME .
# Step 2: Tag the Docker image
echo "Tagging Docker image..."
docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
# Step 3: Push the Docker image to the registry
echo "Pushing Docker image to registry..."
docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
# Check if the image was pushed successfully
if [ $? -eq 0 ]; then
echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
else
echo "Error: Docker push failed!"
exit 1
fi
\ No newline at end of file
import grpc
import agent_pb2
import agent_pb2_grpc
def run():
channel = grpc.insecure_channel("localhost:8061")
stub = agent_pb2_grpc.AgentStub(channel)
user_query = input("Please enter your query (e.g., 'Is it sunny in Rome?'): ")
request = agent_pb2.UserQuery(text=user_query)
response = stub.ProcessUserQuery(request)
print("Response from agent:", response.text)
channel.close()
if __name__ == "__main__":
run()
grpcio==1.38.0
grpcio-tools==1.38.0
grpc-interceptor
protobuf==3.16.0
multithreading
openai
requests
geopy
\ No newline at end of file
import grpc
import json
from concurrent import futures
import agent_pb2
import agent_pb2_grpc
from tool_calling.tools import get_attractions, get_wiki, tools
from tool_calling.llm_client import client, deployment_name
def run_conversation(prompt: str) -> str:
messages = [{"role": "user", "content": prompt}]
# First API call: Ask model for response or tool invocation
response = client.chat.completions.create(
model=deployment_name, messages=messages, tools=tools, tool_choice="required"
)
response_message = response.choices[0].message
messages.append(response_message)
print("\nModel's Response:")
print(response_message)
# If the model requests tool execution, handle it
if response_message.tool_calls:
for tool_call in response_message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
print(f"\nFunction Call: {function_name}")
print(f"Arguments: {function_args}")
if function_name == "get_attractions":
function_response = get_attractions(
location=function_args.get("location")
)
elif function_name == "get_wiki":
function_response = get_wiki(topic=function_args.get("topic"))
else:
function_response = json.dumps({"error": "Unknown function"})
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
else:
print("No tool calls were made by the model.")
# Second API call: Get final response
final_response = client.chat.completions.create(
model=deployment_name,
messages=messages,
)
return final_response.choices[0].message.content
class PlacesAgent(agent_pb2_grpc.AgentServicer):
def __init__(self):
self.latest_places_info = None
def evaluateAgentRequestsFromPlanner(self, request_iterator, context):
for request in request_iterator:
if request.agent_type == agent_pb2.PLACES:
print(f"\nReceived query: {request.text}")
places_info = run_conversation(request.text)
if places_info is None:
places_info = "No weather info available."
self.latest_places_info = places_info
print(f"Weather info set to: {self.latest_places_info}")
response = agent_pb2.AgentResponse(text=places_info)
yield response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
agent_pb2_grpc.add_AgentServicer_to_server(PlacesAgent(), server)
port = 8061
server.add_insecure_port(f"[::]:{port}")
print(f"\Places agent server started on port {port}...")
server.start()
server.wait_for_termination()
if __name__ == "__main__":
serve()
from openai import OpenAI
import os
from base64 import b64encode
platform = "OpenAI"
# "Llama-3.3-70B-Instruct"
# "gpt-4o-mini"
deployment_name = "gpt-4o-mini"
if platform == "GenAI":
# Initialize OpenAI client
genai_username = os.getenv("IAIS_GENAI_USERNAME")
genai_password = os.getenv("IAIS_GENAI_PASSWORD")
token_string = f"{genai_username}:{genai_password}"
token_bytes = b64encode(token_string.encode())
client = OpenAI(
api_key="xxxx",
default_headers={"Authorization": f"Basic {token_bytes.decode()}"},
base_url="https://genai.iais.fraunhofer.de/api/v2",
)
elif platform == "OpenAI":
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
# os.environ["OPENAI_ORG_ID"] = os.getenv("OPENAI_ORG_ID")
client = OpenAI()
import requests
import json
from geopy.geocoders import Nominatim
tools = [
{
"type": "function",
"function": {
"name": "get_attractions",
"description": "Get nearby tourist attractions for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "Search for nearby attractions.",
}
},
"required": ["location"],
"additionalProperties": False,
},
},
},
{
"type": "function",
"function": {
"name": "get_wiki",
"description": "Fetches a summary and metadata for a given Wikipedia topic.",
"parameters": {
"type": "object",
"properties": {
"topic": {
"type": "string",
"description": "The topic to search for on Wikipedia.",
}
},
"required": ["topic"],
"additionalProperties": False,
},
},
},
]
def get_attractions(location: str, radius: int = 500):
# Initialize geolocator
geolocator = Nominatim(user_agent="your_travel_app")
# Get latitude and longitude of the location
location_data = geolocator.geocode(location)
if not location_data:
return json.dumps({"error": f"Location '{location}' not found"}, indent=4)
lat, lon = location_data.latitude, location_data.longitude
# Construct the Overpass API query for tourism data
overpass_url = "http://overpass-api.de/api/interpreter"
query = f"""
[out:json];
(
node["tourism"](around:{radius},{lat},{lon});
way["tourism"](around:{radius},{lat},{lon});
relation["tourism"](around:{radius},{lat},{lon});
);
out center;
"""
# Fetch data from Overpass API
try:
response = requests.get(overpass_url, params={"data": query})
response.raise_for_status() # Raises an error for bad responses (4xx or 5xx)
data = response.json()
except requests.exceptions.RequestException as e:
return json.dumps({"error": f"Error fetching attractions: {str(e)}"}, indent=4)
# Process the attraction data
attractions = []
for element in data.get("elements", []):
tags = element.get("tags", {})
name = (
tags.get("name")
or tags.get("official_name")
or tags.get("brand")
or tags.get("operator")
)
tourism_type = tags.get("tourism", "Unknown Type")
latitude = element.get("lat") or element.get("center", {}).get("lat")
longitude = element.get("lon") or element.get("center", {}).get("lon")
if name and latitude and longitude:
attraction_info = {
"name": name,
"type": tourism_type,
"latitude": latitude,
"longitude": longitude,
}
attractions.append(attraction_info)
return json.dumps({"attractions": attractions}, indent=4)
def get_wiki(topic: str):
topic_for_url = topic.replace(" ", "_")
wiki_url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic_for_url}"
# Fetch data from Wikipedia API
try:
response = requests.get(wiki_url)
response.raise_for_status() # Raises an error for bad responses (4xx or 5xx)
data = response.json()
except requests.exceptions.RequestException as e:
return json.dumps(
{"error": f"Wikipedia page for '{topic}' not found: {str(e)}"}, indent=4
)
# Extract useful information from the response
title = data.get("title", topic)
summary = data.get("extract", "No summary available")
page_url = data.get("content_urls", {}).get("desktop", {}).get("page", "")
thumbnail = data.get("thumbnail", {}).get("source", "")
wiki_info = {
"title": title,
"summary": summary,
"page_url": page_url,
"thumbnail": thumbnail,
}
return json.dumps(wiki_info, indent=4)
cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/planner-stream
\ No newline at end of file
# Use a slim Python base image
FROM python:3.10-slim
ENV PYTHONUNBUFFERED=1
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
build-essential \
gcc \
libffi-dev \
libssl-dev \
git \
curl \
ca-certificates \
tzdata \
protobuf-compiler && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set timezone
ARG TIMEZONE=Europe/Berlin
ENV TZ=$TIMEZONE
RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
# Set working directory
WORKDIR /chatbot
# Copy requirements and install
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy project files
COPY . .
# Compile .proto file
RUN python3 -m grpc_tools.protoc \
--proto_path=. \
--python_out=. \
--grpc_python_out=. \
planner.proto
# Run the server
CMD ["python3", "-u", "server.py"]
\ No newline at end of file
#!/bin/bash
# Variables
IMAGE_NAME="planner-stream"
REGISTRY_URL="cicd.ai4eu-dev.eu:7444"
REPO_PATH="tutorials/multi-agent-new"
TAG="latest"
# Step 1: Build the Docker image
echo "Building Docker image..."
docker build -t $IMAGE_NAME .
# Step 2: Tag the Docker image
echo "Tagging Docker image..."
docker tag $IMAGE_NAME $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
# Step 3: Push the Docker image to the registry
echo "Pushing Docker image to registry..."
docker push $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG
# Check if the image was pushed successfully
if [ $? -eq 0 ]; then
echo "Docker image successfully pushed to $REGISTRY_URL/$REPO_PATH/$IMAGE_NAME:$TAG"
else
echo "Error: Docker push failed!"
exit 1
fi
\ No newline at end of file
import grpc
import planner_pb2
import planner_pb2_grpc
def run():
# Connect to the gRPC server
with grpc.insecure_channel("localhost:8061") as channel:
stub = planner_pb2_grpc.PlannerStub(channel)
# Example user query
query = "What's the weather like today?"
request = planner_pb2.UserQuery(text=query)
# Send request and get response
response = stub.ProcessUserQuery(request)
# Print the response
print(f"Response: {response.text}")
print(f"Agent Type: {response.agent_type}")
if __name__ == "__main__":
run()
syntax = "proto3";
message Empty {
}
message UserQuery {
string text = 1;
}
message AgentRequest {
string text = 1;
AgentType agent_type = 2;
}
enum AgentType {
UNKNOWN = 0;
WEATHER = 1;
PLACES = 2;
FOOD = 3;
MAP = 4;
}
message AgentResponse {
string text = 1;
}
service Planner {
rpc evaluateUserQueriesFromChatbot(stream UserQuery) returns (stream AgentRequest);
rpc processAgentResponsesFromAgent(stream AgentResponse) returns (stream AgentResponse);
}
grpcio==1.38.0
grpcio-tools==1.38.0
grpc-interceptor
protobuf==3.16.0
multithreading
Flask
torch
transformers
\ No newline at end of file
import grpc
from concurrent import futures
import planner_pb2
import planner_pb2_grpc
from transformers import pipeline
import logging
import time
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Zero-shot classifier
classifier = pipeline("zero-shot-classification")
def match_agent(text):
candidate_labels = ["weather", "places", "food", "map"]
result = classifier(text, candidate_labels)
label = result["labels"][0]
if label == "weather":
return planner_pb2.WEATHER
elif label == "places":
return planner_pb2.PLACES
elif label == "food":
return planner_pb2.FOOD
elif label == "map":
return planner_pb2.MAP
else:
return planner_pb2.UNKNOWN
class PlannerService(planner_pb2_grpc.PlannerServicer):
def __init__(self):
logger.info("PlannerService initialized.")
def evaluateUserQueriesFromChatbot(self, request_iterator, context):
print("evaluateUserQueriesFromChatbot..............................")
for user_query in request_iterator:
logger.info(f"Received user query: {user_query.text}")
agent_type = match_agent(user_query.text)
logger.info(f"Matched agent type: {agent_type}")
agent_response = planner_pb2.AgentRequest()
agent_response.text = user_query.text
agent_response.agent_type = agent_type
if agent_type == planner_pb2.AgentType.WEATHER:
logger.info("Handling WEATHER agent.")
elif agent_type == planner_pb2.AgentType.PLACES:
logger.info("Handling PLACES agent.")
elif agent_type == planner_pb2.AgentType.FOOD:
logger.info("Handling FOOD agent.")
elif agent_type == planner_pb2.AgentType.MAP:
logger.info("Handling MAP agent.")
else:
agent_response.text = "Sorry, I couldn't understand the query."
yield agent_response
def processAgentResponsesFromAgent(self, request_iterator, context):
logger.info("Receiving AgentResponses from agent...")
for response in request_iterator:
logger.info(f"Received AgentResponse: {response.text}")
res = planner_pb2.AgentResponse()
res.text = response.text
print(res)
yield res
logger.info("Finished processing AgentResponses.")
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
planner_pb2_grpc.add_PlannerServicer_to_server(PlannerService(), server)
server.add_insecure_port("[::]:8061")
server.start()
logger.info("Planner Service running on port 8061")
server.wait_for_termination()
if __name__ == "__main__":
serve()
cicd.ai4eu-dev.eu:7444/tutorials/multi-agent-new/weather-agent-s
\ No newline at end of file
# Use a slim Python base image
FROM python:3.10-slim
ENV PYTHONUNBUFFERED=1
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
build-essential \
gcc \
libffi-dev \
libssl-dev \
git \
curl \
ca-certificates \
tzdata \
protobuf-compiler && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set timezone
ARG TIMEZONE=Europe/Berlin
ENV TZ=$TIMEZONE
RUN ln -snf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && echo $TIMEZONE > /etc/timezone
# Set working directory
WORKDIR /weather-agent
# Copy requirements and install
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy project files
COPY . .
# Compile .proto file
RUN python3 -m grpc_tools.protoc \
--proto_path=. \
--python_out=. \
--grpc_python_out=. \
agent.proto
# Run the server
CMD ["python3", "-u", "server.py"]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment