Skip to content
Snippets Groups Projects
Commit ba220885 authored by Georgios Koukis's avatar Georgios Koukis
Browse files

statistics added

parent 0f2b22a8
No related branches found
No related tags found
No related merge requests found
...@@ -45,3 +45,15 @@ ADD playbooks/files/acm/* /opt/clusterslice/playbooks/files/acm/ ...@@ -45,3 +45,15 @@ ADD playbooks/files/acm/* /opt/clusterslice/playbooks/files/acm/
ADD configuration.sh /opt/clusterslice/ ADD configuration.sh /opt/clusterslice/
ADD basic_functions.sh /opt/clusterslice/ ADD basic_functions.sh /opt/clusterslice/
ADD benchmarking.sh /opt/clusterslice/ ADD benchmarking.sh /opt/clusterslice/
ADD ansible/ansible.cfg /opt/clusterslice/ansible/
ADD ansible/ansible-jaeger.cfg /opt/clusterslice/ansible/
ADD ansible/callback_plugins/codeco_plugin.py /opt/clusterslice/ansible/callback_plugins/
# Install pip
RUN apt-get update && apt-get install -y python3-pip
# Install the OpenTelemetry Python packages
RUN pip3 install opentelemetry-api opentelemetry-exporter-otlp opentelemetry-sdk
# Install community.general collection in Ansible
RUN ansible-galaxy collection install community.general
import time
import os
from ansible.plugins.callback import CallbackBase
from datetime import timedelta
class CallbackModule(CallbackBase):
"""
Custom callback plugin to calculate execution time and track task status.
"""
CALLBACK_VERSION = 1.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'codeco_plugin'
def __init__(self):
super(CallbackModule, self).__init__()
self.task_start_time = None
self.task_stats = {}
self.processed_tasks = set() # To track processed tasks and avoid duplicates
self.experiment_name = os.getenv('EXPERIMENT_NAME', '')
self.node_name = os.getenv('NODE_NAME', '')
self.cluster_number = os.getenv('CLUSTER_NUMBER', '')
self.logfile = f'/opt/clusterslice/results/{self.experiment_name}/{self.node_name}_{self.cluster_number}_output.txt'
self._ensure_log_directory()
def _ensure_log_directory(self):
# Ensure the results directory exists, create if not
log_dir = os.path.dirname(self.logfile)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
def _write_to_logfile(self, content):
# Write content to the logfile
with open(self.logfile, 'a', buffering=1) as log_file:
log_file.write(content + '\n')
def v2_playbook_on_task_start(self, task, is_conditional):
# Record task start time without logging
self.task_start_time = time.time()
task_name = task.get_name().strip()
#self._display.display(f"Task '{task_name}' is being executed.", color='cyan')
def v2_runner_on_ok(self, result):
task_name = result.task_name
task_host = result._host # Get the host for uniqueness
# Create a unique key for this task based on name and host
task_key = f"{task_name}_{task_host}"
# Check if this task has been processed before
if task_key not in self.processed_tasks:
task_duration = self._get_task_duration()
self._log_task_result(task_name, "SUCCESS", task_duration, task_host)
self.processed_tasks.add(task_key) # Mark task as processed
def v2_runner_on_failed(self, result, ignore_errors=False):
task_name = result.task_name
task_host = result._host # Get the host for uniqueness
# Create a unique key for this task based on name and host
task_key = f"{task_name}_{task_host}"
# Check if this task has been processed before
if task_key not in self.processed_tasks:
task_duration = self._get_task_duration()
self._log_task_result(task_name, "FAILED", task_duration, task_host)
self.processed_tasks.add(task_key) # Mark task as processed
def v2_runner_on_skipped(self, result):
task_name = result.task_name
task_host = result._host # Get the host for uniqueness
# Create a unique key for this task based on name and host
task_key = f"{task_name}_{task_host}"
# Check if this task has been processed before
if task_key not in self.processed_tasks:
task_duration = self._get_task_duration()
self._log_task_result(task_name, "SKIPPED", task_duration, task_host)
self.processed_tasks.add(task_key) # Mark task as processed
def v2_playbook_on_stats(self, stats):
# Output the summary of all task execution times
self._display.display("Playbook execution complete.", color='blue')
for task_name, info in self.task_stats.items():
status = info["status"]
host = info["host"]
duration = str(timedelta(seconds=info["duration"]))
self._display.display(f"Task '{task_name}': {status} - Duration: {duration}", color='green' if status == 'SUCCESS' else 'red')
# Keep statistics in log file
log_entry = f"{host}, {task_name}, {status}, {duration}"
# Write log entry to the logfile
self._write_to_logfile(log_entry)
def _get_task_duration(self):
"""
Helper function to calculate the duration of the task
"""
return time.time() - self.task_start_time
def _log_task_result(self, task_name, status, duration, host):
"""
Helper function to log the result and duration of the task
"""
self.task_stats[task_name] = {
"status": status,
"duration": duration,
"host": host
}
self._display.display(f"Task '{task_name}' {status}. Duration: {timedelta(seconds=duration)}", color='green' if status == 'SUCCESS' else 'red')
#!/bin/bash
# This script counts the number of cluster deployments
# Define the file name
FILE="cluster-deployment"
# Check if the file exists
if [ ! -f "$FILE" ]; then
# If it doesn't exist, create the file and set initial count to 0
echo 0 > "$FILE"
fi
# Read the current count from the file
count=$(cat "$FILE")
# Increment the count by 1
count=$((count + 1))
# Save the updated count back to the file
echo "$count" > "$FILE"
# Optionally, print the current count to the console
echo "Deploying cluster number $count."
# Setting appropriate variable
cluster_number=$count
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment