From 49d75ba6b70d23895db240934f2fd2ce1418e0ba Mon Sep 17 00:00:00 2001
From: zlovarp <arpad.lovassy@semcon.com>
Date: Wed, 7 Mar 2018 13:53:59 +0100
Subject: [PATCH] removed unused python scripts

Signed-off-by: zlovarp <arpad.lovassy@semcon.com>
---
 common/version.py                          |   66 -
 etc/autotest/.titan_builder-esekilxxen1843 |   39 -
 etc/autotest/.titan_builder-esekits1064    |   30 -
 etc/autotest/.titan_builder-esekits3013    |   33 -
 etc/autotest/.titan_builder-esekits5013    |   39 -
 etc/autotest/.titan_builder-rhea           |   26 -
 etc/autotest/.titan_builder-tcclab1        |   23 -
 etc/autotest/README                        |  286 -----
 etc/autotest/freshbuild.sh                 |   24 -
 etc/autotest/mountall.sh                   |   20 -
 etc/autotest/product_handler.py            |  287 -----
 etc/autotest/titan_builder.py              | 1131 -----------------
 etc/autotest/titan_builder.sh              |   24 -
 etc/autotest/titan_builder_cfg.py          |  793 ------------
 etc/autotest/titan_publisher.py            | 1291 --------------------
 etc/autotest/web/titan_builder.css         |   46 -
 etc/autotest/web/titan_builder.php         |  228 ----
 etc/scripts/cfg_msg_maker.py               |   35 -
 etc/scripts/tpd_graph_xml2dot.py           |   28 -
 19 files changed, 4449 deletions(-)
 delete mode 100644 common/version.py
 delete mode 100644 etc/autotest/.titan_builder-esekilxxen1843
 delete mode 100644 etc/autotest/.titan_builder-esekits1064
 delete mode 100644 etc/autotest/.titan_builder-esekits3013
 delete mode 100644 etc/autotest/.titan_builder-esekits5013
 delete mode 100644 etc/autotest/.titan_builder-rhea
 delete mode 100644 etc/autotest/.titan_builder-tcclab1
 delete mode 100644 etc/autotest/README
 delete mode 100755 etc/autotest/freshbuild.sh
 delete mode 100755 etc/autotest/mountall.sh
 delete mode 100755 etc/autotest/product_handler.py
 delete mode 100755 etc/autotest/titan_builder.py
 delete mode 100755 etc/autotest/titan_builder.sh
 delete mode 100755 etc/autotest/titan_builder_cfg.py
 delete mode 100755 etc/autotest/titan_publisher.py
 delete mode 100644 etc/autotest/web/titan_builder.css
 delete mode 100644 etc/autotest/web/titan_builder.php
 delete mode 100644 etc/scripts/cfg_msg_maker.py
 delete mode 100644 etc/scripts/tpd_graph_xml2dot.py

diff --git a/common/version.py b/common/version.py
deleted file mode 100644
index 707b3e042..000000000
--- a/common/version.py
+++ /dev/null
@@ -1,66 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   Balasko, Jeno
-#   Pandi, Krisztian
-#
-##############################################################################
-import datetime
- 
-from tempfile import mkstemp
-from shutil import move
-from os import remove, close
-
-from subprocess import call
-from sys import exit
-
-def getnumberfromfileline(line):
-    lineasstring = str(line);
-    numberfromfile = lineasstring[-3:];
-    number = int(numberfromfile) + 1;
-    print number;
-    if number >= 99:
-	print 'Number is over the limit: >=99. File is not modified!'
-        exit();
-    return number
-    
-
-def replace(file_path):
-    #Create temp file
-    fh, abs_path = mkstemp()
-    new_file = open(abs_path,'w')
-    old_file = open(file_path)
-    for line in old_file:
-		if '#define TTCN3_PATCHLEVEL' in line:
-				newline = str('#define TTCN3_PATCHLEVEL ') + str(getnumberfromfileline(line)) + str('\n');
-				new_file.write(newline);
-		elif '#define TTCN3_VERSION 30' in line:
-		                number = getnumberfromfileline(line);
-				if number <= 9:
-					newline = str('#define TTCN3_VERSION 302') +'0' + str(number) + str('\n');
-				else:
-					newline = str('#define TTCN3_VERSION 302') + str(number) + str('\n');
-				new_file.write(newline);
-		else:
-			new_file.write(line)
-    #close temp file
-    new_file.close()
-    close(fh)
-    old_file.close()
-    #Remove original file
-    remove(file_path)
-    #Move new file
-    move(abs_path, file_path)
- 
-#( d.isoweekday() in range(1, 6)
-#d = datetime.datetime.now();
-#if d.isoweekday() == 2 or d.isoweekday() == 4 :
-replace ("version.h");  
-#call(["git", "commit", "-m 'TTCN3_PATCHLEVEL update'" ,"version.h"]);
-#	call(["git", "push"]);
-
diff --git a/etc/autotest/.titan_builder-esekilxxen1843 b/etc/autotest/.titan_builder-esekilxxen1843
deleted file mode 100644
index 9d18c41ac..000000000
--- a/etc/autotest/.titan_builder-esekilxxen1843
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `esekilxxen1843' (147.214.13.100).
-
-SS7_HOME=/mnt/common_store/tools/EIN/linux64
-EIN_DIR=${SS7_HOME}
-OSS_DIR=/mnt/TTCN/Tools/OSS/linux-glibc2.3-amd64/8.5.0
-SEA_DIR=/mnt/common_store/tools/SEA/linux64
-GLIB_DIR=/usr
-SCTP_DIR=/usr
-
-if [ x`uname -s` = "xSunOS" ]; then
-PATH=/proj/TTCN/Tools/libxml2-2.7.8/bin:/app/expect/5.42/bin:/proj/TTCN/Tools/grep-2.9/bin:/proj/TTCN/Tools/perl-5.10.1/bin:/proj/TTCN/Tools/tar-1.25/bin:/proj/TTCN/Tools/make-3.82/bin:/usr/bin:/bin:${PATH}
-LD_LIBRARY_PATH=${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-else
-PATH=/app/libxml/2.7.7/LMWP2/bin:/usr/bin:/bin:${PATH}
-LD_LIBRARY_PATH=/app/libxml/2.7.7/LMWP2/lib:${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-fi
-
-TTCN3_LICENSE_FILE=/home/titanrt/license_8706.dat
-CVSROOT=:ext:titanrt@esekilx5008.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-MODULEPATH=/app/TITAN/modules:/proj/TTCN/modules:/app/modules/0/modulefiles:/home/titanrt/.afs/0/rmodules:/home/titanrt/.afs/0/pmodules:/env/seki/modules:/home/titanrt/.afs/0/imodules:/env/common/modules
-JAVA_HOME=/app/jdk/1.6.0_30/LMWP3
-
-# For HUB access.
-TCCLAB1_IP=159.107.194.73
-TCCLAB2_IP=159.107.194.76
-TCCLAB3_IP=159.107.194.74
-TCCLAB5_IP=159.107.194.75
-
-export JAVA_HOME TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR SEA_DIR PATH LD_LIBRARY_PATH EIN_DIR SS7_HOME TCCLAB1_IP TCCLAB2_IP TCCLAB3_IP TCCLAB5_IP MODULEPATH
-
-eval `/app/modules/0/bin/modulecmd bash add cvs`
-eval `/app/modules/0/bin/modulecmd bash add expect`
-eval `/app/modules/0/bin/modulecmd bash add TCCENV`
-eval `/app/modules/0/bin/modulecmd bash add libxml`
diff --git a/etc/autotest/.titan_builder-esekits1064 b/etc/autotest/.titan_builder-esekits1064
deleted file mode 100644
index 42d2a987a..000000000
--- a/etc/autotest/.titan_builder-esekits1064
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `esekits1064' (147.214.15.101).
-
-SS7_HOME=/mnt/common_store/tools/EIN/linux64
-EIN_DIR=${SS7_HOME}
-OSS_DIR=/mnt/TTCN/Tools/OSS/linux-glibc2.3-amd64/8.5.0
-SEA_DIR=/mnt/common_store/tools/SEA/linux64
-GLIB_DIR=/usr
-SCTP_DIR=/usr
-
-PATH=/usr/bin:/bin:${PATH}
-LD_LIBRARY_PATH=${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-
-TTCN3_LICENSE_FILE=${HOME}/license_8706.dat
-CVSROOT=:ext:titanrt@esekilx5008.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-MODULEPATH=/app/TITAN/modules:/proj/TTCN/modules:/app/modules/0/modulefiles:/home/titanrt/.afs/0/rmodules:/home/titanrt/.afs/0/pmodules:/env/seki/modules:/home/titanrt/.afs/0/imodules:/env/common/modules
-
-# For HUB access.
-TCCLAB1_IP=159.107.194.73
-TCCLAB2_IP=159.107.194.76
-TCCLAB3_IP=159.107.194.74
-TCCLAB5_IP=159.107.194.75
-
-export TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR SEA_DIR PATH LD_LIBRARY_PATH EIN_DIR SS7_HOME TCCLAB1_IP TCCLAB2_IP TCCLAB3_IP TCCLAB5_IP MODULEPATH
-
-/app/modules/3.2.8/bin/modulecmd bash add cvs
diff --git a/etc/autotest/.titan_builder-esekits3013 b/etc/autotest/.titan_builder-esekits3013
deleted file mode 100644
index f541d6f7f..000000000
--- a/etc/autotest/.titan_builder-esekits3013
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `esekits3013' (147.214.15.96).
-
-SS7_HOME=/mnt/common_store/tools/EIN/linux64
-EIN_DIR=${SS7_HOME}
-OSS_DIR=/mnt/TTCN/Tools/OSS/linux-glibc2.3-amd64/8.5.0
-SEA_DIR=/mnt/common_store/tools/SEA/linux64
-GLIB_DIR=/usr
-SCTP_DIR=/usr
-
-if [ x`uname -s` = "xSunOS" ]; then
-PATH=/proj/TTCN/Tools/libxml2-2.7.8/bin:/app/expect/5.42/bin:/proj/TTCN/Tools/grep-2.9/bin:/proj/TTCN/Tools/perl-5.10.1/bin:/proj/TTCN/Tools/tar-1.25/bin:/proj/TTCN/Tools/make-3.82/bin:/usr/bin:/bin:${PATH}
-fi
-LD_LIBRARY_PATH=${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-
-TTCN3_LICENSE_FILE=/home/titanrt/license_8706.dat
-CVSROOT=:ext:titanrt@esekilx5008.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-MODULEPATH=/app/TITAN/modules:/proj/TTCN/modules:/app/modules/0/modulefiles:/home/titanrt/.afs/0/rmodules:/home/titanrt/.afs/0/pmodules:/env/seki/modules:/home/titanrt/.afs/0/imodules:/env/common/modules
-
-# For HUB access.
-TCCLAB1_IP=159.107.194.73
-TCCLAB2_IP=159.107.194.76
-TCCLAB3_IP=159.107.194.74
-TCCLAB5_IP=159.107.194.75
-
-export TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR SEA_DIR PATH LD_LIBRARY_PATH EIN_DIR SS7_HOME TCCLAB1_IP TCCLAB2_IP TCCLAB3_IP TCCLAB5_IP MODULEPATH
-
-/app/modules/3.2.8/bin/modulecmd bash add cvs
-/app/modules/3.2.8/bin/modulecmd bash add expect
diff --git a/etc/autotest/.titan_builder-esekits5013 b/etc/autotest/.titan_builder-esekits5013
deleted file mode 100644
index 73244f5eb..000000000
--- a/etc/autotest/.titan_builder-esekits5013
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `esekits5013' (147.214.15.153).
-
-SS7_HOME=/mnt/common_store/tools/EIN/linux64
-EIN_DIR=${SS7_HOME}
-OSS_DIR=/mnt/TTCN/Tools/OSS/linux-glibc2.3-amd64/8.5.0
-SEA_DIR=/mnt/common_store/tools/SEA/linux64
-GLIB_DIR=/usr
-SCTP_DIR=/usr
-
-if [ x`uname -s` = "xSunOS" ]; then
-PATH=/proj/TTCN/Tools/libxml2-2.7.8/bin:/app/expect/5.42/bin:/proj/TTCN/Tools/grep-2.9/bin:/proj/TTCN/Tools/perl-5.10.1/bin:/proj/TTCN/Tools/tar-1.25/bin:/proj/TTCN/Tools/make-3.82/bin:/usr/bin:/bin:${PATH}
-LD_LIBRARY_PATH=${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-else
-PATH=/app/libxml/2.7.7/LMWP2/bin:/usr/bin:/bin:${PATH}
-LD_LIBRARY_PATH=/app/libxml/2.7.7/LMWP2/lib:${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-fi
-
-TTCN3_LICENSE_FILE=/home/titanrt/license_8706.dat
-CVSROOT=:ext:titanrt@esekilx5008.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-MODULEPATH=/app/TITAN/modules:/proj/TTCN/modules:/app/modules/0/modulefiles:/home/titanrt/.afs/0/rmodules:/home/titanrt/.afs/0/pmodules:/env/seki/modules:/home/titanrt/.afs/0/imodules:/env/common/modules
-JAVA_HOME=/app/jdk/1.6.0_30/LMWP3
-
-# For HUB access.
-TCCLAB1_IP=159.107.194.73
-TCCLAB2_IP=159.107.194.76
-TCCLAB3_IP=159.107.194.74
-TCCLAB5_IP=159.107.194.75
-
-export JAVA_HOME TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR SEA_DIR PATH LD_LIBRARY_PATH EIN_DIR SS7_HOME TCCLAB1_IP TCCLAB2_IP TCCLAB3_IP TCCLAB5_IP MODULEPATH
-
-eval `/app/modules/0/bin/modulecmd bash add cvs`
-eval `/app/modules/0/bin/modulecmd bash add expect`
-eval `/app/modules/0/bin/modulecmd bash add TCCENV`
-eval `/app/modules/0/bin/modulecmd bash add libxml`
diff --git a/etc/autotest/.titan_builder-rhea b/etc/autotest/.titan_builder-rhea
deleted file mode 100644
index 63863fd49..000000000
--- a/etc/autotest/.titan_builder-rhea
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `rhea' (159.107.193.33).
-
-SS7_HOME=/view/eferkov_tcc/vobs/ttcn/TCC_Common/Other/EINSS7/19089-lzy2143405_1_R1C/EABss7019
-EIN_DIR=${SS7_HOME}
-OPENSSL_DIR=/mnt/TTCN/Tools/openssl-0.9.8k
-OSS_DIR=/mnt/TTCN/Tools/oss/ossasn1/solaris-2.4/latest
-OSSXINFO=/mnt/TTCN/Tools/oss/xsdasn1/solaris-2.4/latest
-TENUX_DIR=/view/eferkov_tcc/vobs/ttcn/TCC_Common/Other/tenux
-# It will not work, unless a view is active...
-SEA_DIR=/view/eferkov_tcc/vobs/ttcn/TCC_Common/Other/SEA_LATEST
-SCTP_DIR=/view/eferkov_tcc/vobs/ttcn/TCC_Common/Other/SCTP/sctplib-1.0.1/sctplib/sctp/.libs
-GLIB_DIR=/usr/local/lib
-
-PATH=/mnt/TTCN/Tools/libxml2-2.7.1/bin:${SEA_DIR}/bin:${OSSXINFO}/bin:${OSS_DIR}/bin:/mnt/TTCN/Tools/tar-1.18/bin:/mnt/TTCN/Tools/gcc-4.1.2-sol8/bin:/home/cfccadm/bin/public:usr/atria/bin:/usr/local/gnu/bin:/usr/local/bin:/usr/bin:/bin:/mnt/TTCN/Tools/makedepend-R6.6/bin:${PATH}
-LD_LIBRARY_PATH=${SS7_HOME}/lib:/mnt/TTCN/Tools/openssl-0.9.8k/lib:/mnt/TTCN/Tools/libxml2-2.7.1/lib:${TENUX_DIR}/lib:${SEA_DIR}/lib:${OSSXINFO}/lib:${OSS_DIR}/lib:/mnt/TTCN/Tools/gcc-3.4.6-sol8/lib:/usr/atria/bin:/usr/local/gnu/lib:/usr/local/lib:/usr/lib:/lib:/mnt/TTCN/Tools/makedepend-R6.6/lib:${LD_LIBRARY_PATH}
-
-TTCN3_LICENSE_FILE=${HOME}/license_8706.dat
-CVSROOT=esekits1064.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/local/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-
-export PATH LD_LIBRARY_PATH TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR OPENSSL_DIR OSSXINFO TENUX_DIR SEA_DIR EIN_DIR SS7_HOME SCTP_DIR GLIB_DIR
-
diff --git a/etc/autotest/.titan_builder-tcclab1 b/etc/autotest/.titan_builder-tcclab1
deleted file mode 100644
index a16ca50da..000000000
--- a/etc/autotest/.titan_builder-tcclab1
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash -x
-
-# Basic configuration for `tcclab1' (172.31.21.7).
-
-SS7_HOME=/mnt/common_store/tools/EIN/linux64
-EIN_DIR=${SS7_HOME}
-OSS_DIR=/mnt/TTCN/Tools/OSS/linux-glibc2.3-amd64/8.5.0
-SEA_DIR=/mnt/common_store/tools/SEA/linux64
-GLIB_DIR=/usr
-SCTP_DIR=/usr
-
-PATH=${HOME}/apps/gnuplot-4.4.0/bin:${OSS_DIR}/bin:${PATH}
-LD_LIBRARY_PATH=${HOME}/apps/gnuplot-4.4.0/lib:${SS7_HOME}/lib64:${SEA_DIR}/lib:${OSS_DIR}/lib:/usr/lib64:${LD_LIBRARY_PATH}
-
-TTCN3_LICENSE_FILE=${HOME}/license_8706.dat
-CVSROOT=esekits1064.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_RSH=${CVS_RSH=ssh}
-CVS_SERVER=/usr/bin/cvs
-EDITOR=mcedit
-
-export TTCN3_LICENSE_FILE CVSROOT CVS_SERVER CVS_RSH EDITOR OSS_DIR SEA_DIR PATH LD_LIBRARY_PATH EIN_DIR SS7_HOME
-
-ulimit -c unlimited
diff --git a/etc/autotest/README b/etc/autotest/README
deleted file mode 100644
index 9e276459e..000000000
--- a/etc/autotest/README
+++ /dev/null
@@ -1,286 +0,0 @@
-List of files
--------------
-
-- titan_builder.sh: Wrapper script for titan_builder.py.  It sets some basic
-                    environment variables for platforms with tools installed
-                    to non-standard locations.
-- titan_builder.py: Build process control functions.  It shouldn't be run
-                    directly, use the wrapper script instead.
-- titan_builder_cfg.py: The configuration file.  It contains the build
-                        configurations, build slaves etc.
-- product_handler.py: Helper functions to build the VOB products.
-- titan_publisher.py: Different publishing related functions.
-- utils.py: General purpose functions.
-
-Brief overview
---------------
-
-When titan_builder.sh is started, it checks out a build configuration specific
-version of TITAN from CVS.  TITAN gets configured according to the build
-configuration (e.g. variables in Makefile.cfg).  When it's ready, TITAN is
-distributed to each slave attached to the given build configuration.  The
-builds are performed sequentially.  All script files are distributed as well
-and the script starts executing in slave mode on the slave machine.  The master
-and the slave can be the same machines.  In this case, using different build
-directories for them is strongly recommended.  When all passes are finished on
-the slave machine the master starts the build on the next slave machine etc.,
-then the remaining build configurations are built.
-
-After all that, the master collects all logs from the slave machines.  The
-slaves can produce three kinds of outputs: CSV, TXT, HTML.  The CSV file is
-essential, since the master will consider each slave lost without such a file.
-The CSV file is used to generate the e-mail notification message as well.
-Finally, the master publishes the logs and cleans things up.  The compiled
-sources will remain available on each slave until the next build.
-
-To run VOB product tests the VOB or a similar directory structure of products
-should be available on master machine.  SSHFS can be used for this purpose.
-The performance test package is not part of the official TITAN test suite,
-hence it needs to be distributed for the slaves as well.  Currently it's just
-an archive reachable on the master machine.  It's recommended to use
-configspec 129 and the TCC_Common VOB.
-
-All shell commands are executed using the `run_cmd()' function.  It has a
-timeout parameter to kill the command.
-
-To make sure, that always the latest version of the scripts runs, one can use
-a simple script, which checks out the sources from CVS first:
-
-#!/bin/bash
-
-rm -rf *.{py,sh} TTCNv3/etc/autotest
-cvs co TTCNv3/etc/autotest && ln -sf TTCNv3/etc/autotest/*.{py,sh} .
-./titan_builder.sh -c tests_on_x86_64_linux,tests_on_solaris,tests_on_i386_solaris,limited_tests_on_ubuntu910 1>/dev/null
-
-Prerequisites
--------------
-
-- Python >= 2.3
-- Configured password-less SSH connections between the master and the slave
-- For scheduled execution use `cron'
-
-Command line options
---------------------
-
--c [c1,c2,...]: Select the build configurations to run.  If no build
-                configuration is selected, then all configurations will be
-                run.  It's a comma separated list.
--d: Print all the available build configurations present in the configuration
-    file.  Slaves attached to a given build configuration are listed as well.
--h: Display available options.
--s: Start the script in slave mode.  This option should only be used by
-    titan_builder.py, unless everything is in place.
-
-Configuration
--------------
-
-It's a Python source file, take care of the syntax.
-
-- Common options
-
-    buiddir:STR The build directory of the master.
-    logdir:STR  The logs of the master go here.
-    htmldir:STR All HTML files will be published here.
-    vob:STR     The VOB products will be copied from here.
-    archive:INT Archive the logs after a specified number of days.
-    cleanup:INT Move the archived logs somewhere else after a specified number of days.
-    measureperiod:INT Reset scores after a given number of days.
-    cleanupslave:{slave:STR,dir:STR} The destination of abandoned logs.
-
-- Recipients
-
-    {name:STR,address:STR}
-
-    The names and e-mail addresses of the recipients.
-
-- Slaves
-
-    slaves[name:STR]:{ip:STR,user:STR,configs:LIST}
-
-    The IP address, the user's name, the build configurations attached to the
-    slave.  Currently one build configuration is supported for each slave.
-
-- Product descriptions
-
-    products[kind:STR]:{name:STR,action0:BOOL,action1:BOOL,...}
-
-    `kind' is always a directory under the VOB's root directory (e.g.
-    TestPorts, ProtocolModules).  `name' is the product name.  Actions can be
-    enabled individually.  The predefined actions are the following:
-
-    - semantic: `compiler -s'
-    - translate: `compiler'
-    - compile: `make'
-    - run: `make run'
-
-- Build configurations
-
-    version:STR        Version of TITAN to use.  It can be a CVS tag or date.
-                       If it's not set the HEAD will be taken.
-    license:STR        Location of the license file.
-    gui:BOOL           The `GUI' part in Makefile.cfg.
-    jni:BOOL           The `JNI' part in Makefile.cfg.
-    debug:BOOL         The `DEBUG' part in Makefile.cfg.
-    compilerflags:STR  The `COMPILERFLAGS' in Makefile.cfg.
-    ldflags:STR        The `LDFLAGS' in Makefile.cfg.
-    gccdir:STR         This will affect `CC' and `CXX'.
-    *cc:STR            Value of `CC' in synch with the previous option.
-    *cxx:STR           Value of `CXX' in synch with the previous option.
-    flex:STR           Replace `FLEX'.
-    perl:STR           Location of the `perl' interpreter.
-    bison:STR          Replace `BISON'.
-    jdkdir:STR         Replace `JDKDIR'.
-    qtdir:STR          Replace `QTDIR'.
-    xmldir:STR         Replace `XMLDIR'.
-    openssldir:STR     Replace `OPENSSL_DIR'.
-    regtest:BOOL       Run regression tests.
-    perftest:BOOL      Run performance tests.  The location of the testsuite
-                       must be known, since it's not part of CVS.  It should
-                       be available for the master locally.
-    perftestdir:STR    Location of the performance tests.
-    *cpsmin:INT        Minimum CPS value for performance tests.
-    *cpsmax:INT        Maximum CPS value for performance tests.
-    functest:BOOL      Run function tests.
-    vobtest:BOOL       Run product tests.
-    *vobtest_logs:BOOL Save logs for product tests.
-    rt2:BOOL           Run tests with both run-times.
-    builddir:STR       Everything will be done here.  It should be different
-                       from the master's.
-    logdir:STR         Place of the logs.
-    *pdfdir:STR        Local directory to copy .pdf files from.  If not present
-                       no *.pdf files will be there.  If it's an empty string
-                       the *.pdf files will be faked with empty files.
-    *xsdtests:BOOL     Disable regression tests for `xsd2ttcn'.  It's very time
-                       consuming.
-    measure:BOOL       Enable `quality' measurements.
-
-(`*' is for optional fields.)
-
-Platform specific settings
---------------------------
-
-If some essential tools (e.g. GCC) are installed to non-standard locations on a
-given platform, all of these environmental settings can be placed into a
-configuration file `${HOME}/.titan_builder'.  This file is always sourced
-first, before starting the actual build.  An example file:
-
-#!/bin/bash
-
-# Basic configuration for `bangjohansen' (172.31.21.76).
-
-PATH=${HOME}/apps/bin:/mnt/TTCN/Tools/gcc-3.4.6-sol10/bin:/mnt/TTCN/Tools/binutils-2.17-sol10/bin:/usr/local/bin:/usr/bin:${PATH}
-LD_LIBRARY_PATH=${HOME}/apps/lib:/mnt/TTCN/Tools/gcc-3.4.6-sol10/lib:/mnt/TTCN/Tools/binutils-2.17-sol10/lib:/usr/local/lib:/usr/lib:${LD_LIBRARY_PATH}
-
-TTCN3_LICENSE_FILE=${HOME}/.TTCN3/license_2536.dat
-CVSROOT=esekits1064.rnd.ki.sw.ericsson.se:/proj/TTCN/cvs_root
-CVS_SERVER=/usr/local/bin/cvs
-CVS_RSH=ssh
-EDITOR=mcedit
-
-export PATH LD_LIBRARY_PATH CVSROOT CVS_SERVER EDITOR TTCN3_LICENSE_FILE CVS_RSH
-
-When something goes wrong
--------------------------
-
-E.g. no e-mail message is received, garbage or missing sections in the e-mail
-or on the generated HTML page...
-
-- Possible reasons and possible solutions
-
-  - Problems with e-mail system: In case of Postfix check the `mailq'
-    command.  All messages still in the queue should be printed.  An example
-    output of the `mailq' command:
-
-    -Queue ID- --Size-- ----Arrival Time---- -Sender/Recipient-------
-    5A57D160B02     1696 Fri Oct 16 09:32:05  ferenc.kovacs@ericsson.com
-    (host mwux020.eth.ericsson.se[159.107.148.18] said: 452 4.4.5
-    Insufficient disk space; try again later (in reply to MAIL FROM command))
-    ferenc.kovacs@ericsson.com
-
-    To clear all the messages: `postsuper -d ALL'.
-
-  - Exception in the Python code: Check the titan_builder.err-log file of the
-    master and the slaves.  All exceptions should go into this file.  The
-    other log file titan_builder.log can be useful as well.
-
-  - Disk quota exceeded.
-
-  - The SSHFS mounted partitions (e.g. used for HTML publishing) get umounted.
-    The common cause is a segfault:
-
-    Dec 12 07:00:50 tcclab1 kernel: sshfs[20289]: segfault at 6507a ip
-    7fbf99315064 sp 44288ea8 error 4 in libc-2.5.so[7fbf992a2000+139000]
-
-    Check /var/log/messages for details.
-
-    If SSHFS locks up and the mounted partitions cannot be umounted like:
-
-    titanrt@tcclab1:~> fusermount -u ~/public_html
-    fusermount: failed to unmount /home/titanrt/public_html: Device or resource busy
-
-    Simply `killall -9 sshfs' and try to remount the partitions.  If SSHFS
-    locks up you may not be able to execute any command in the parent
-    directory of these mount points.  The commands will simply hang.  If it
-    doesn't work, try the following:
-
-    titanrt@tcclab1:~> lsof | grep public_html
-    lsof: WARNING: can't stat() fuse file system /home/titanrt/public_html
-    Output information may be incomplete.
-    bash 2378  titanrt cwd unknown /home/titanrt/public_html/titan_builds (stat: Transport endpoint is not connected)
-    ssh  13480 titanrt cwd unknown /home/titanrt/public_html/titan_builds (stat: Transport endpoint is not connected)
-    titanrt@tcclab1:~> kill -9 2378
-
-    Unfortunately SSHFS is very unstable and hangs regularly, at least the 1.7
-    version used on the master machine, which was released in 2006.  Another
-    thing that may help:
-
-    titanrt@tcclab1:~> killall -9 sshfs 
-    tcclab1:/home/titanrt/titan_nightly_builds # umount -l vobs/ttcn
-    tcclab1:/home/titanrt/titan_nightly_builds # umount -l vobs
-
-  - It's possible that some kind of network problem prevents the nightly tests to
-    check out the latest script files. If there're broken links in the test
-    directory and the test results are missing this might be the case. To make
-    the script run next morning the freshbuild.sh needs to be run first, since
-    freshbuild.sh is run by cron.
-
-Licenses
---------
-
-The script can be run by anyone.  Currently, `titanrt' is used for this
-purpose, however, the actual user can really be anyone, since it is set from
-the environment.  It is a general user available on most of our supported
-platforms, it can be created manually in need.  To eliminate user dependence,
-all non-absolute paths in the configuration file are prefixed automatically
-with the current user's home directory.
-
-To run the tests on all of the supported platforms we're using `hostid' based
-license files.  The license file numbers at the moment:
-
-  tcclab1 (172.31.21.7, 0x67666473): 4812
-  tcclab2 (172.31.21.49, 0x67666473): 4812
-  rhea (159.107.193.33, 0x83dbd963): 5628
-  bangjohansen (172.31.21.76, 0x380f076e): 2551
-  
-Statistics generation
----------------------
-
-Please note, that the statistics automatically generated by the test system
-are useless, unless they're monitored continuously.  Invalid failures will not
-be taken into account by the test system.  The statistics are extracted from
-the `report.txt' files.  Only the `Reg. tests', `Func. tests', `Perf. tests'
-are important.  Weekends are never counted, but holidays need to be handled by
-hand.
-
-Rules:
-
-All of them pass  -> 2 points
-Any of them fails -> 0 points
-Any of them lost  -> 1 points
-
-Sample output:
-
-First period: 2011-03-01 03:01:00
-Overall score: 90/100 (90%) Commitment 
-This period: 2011-06-01 03:01:00
-Period score: 9/10 (90%) Commitment
diff --git a/etc/autotest/freshbuild.sh b/etc/autotest/freshbuild.sh
deleted file mode 100755
index cc469d8ad..000000000
--- a/etc/autotest/freshbuild.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-###############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#   Beres, Szabolcs
-#   Kovacs, Ferenc
-#
-###############################################################################
-
-# We need a CVSROOT.
-. ~/.titan_builder
-rm -rf *.{py,pyc,sh} TTCNv3/etc/autotest
-cvs -t co TTCNv3/etc/autotest 2>&1 | tee cvs_autotest_output.txt && ln -sf TTCNv3/etc/autotest/*.{py,sh} .
-if [ "$1" = "-only_vobtests" ] ; then ./titan_builder.sh -c vobtests_on_x86_64_linux_tcclab1,vobtests_on_sparc_solaris_esekits3013 1>/dev/null
-elif [ "$1" = "-daily" ] ; then ./titan_builder.sh -c x86_64_linux_tcclab3_your_last_chance 1>/dev/null
-else ./titan_builder.sh -c x86_64_linux_tcclab5,x86_64_linux_tcclab4,x86_64_linux_tcclab5_clang,x86_64_linux_tcclab1,x86_64_linux_tcclab2,x86_64_linux_tcclab3,sparc_solaris_esekits3013,i386_solaris_bangjohansen,x86_64_linux_esekilxxen1843,vobtests_on_x86_64_linux_tcclab1 1>/dev/null
-fi
diff --git a/etc/autotest/mountall.sh b/etc/autotest/mountall.sh
deleted file mode 100755
index e856665b7..000000000
--- a/etc/autotest/mountall.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-###############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#
-###############################################################################
-#!/bin/bash
-
-# The view is currently for `eferkov', but it's not important if CS 129 is
-# loaded.  The same for the directory used for publishing.  The license file
-# and the archive directory are for `eferkov' as well.  Update the files here
-# frequently as they change in CVS.
-sshfs -o ro,reconnect,transform_symlinks titanrt@147.214.15.153:/view/eferkov_tcc/vobs/ttcn /home/titanrt/titan_nightly_builds/vobs/ttcn
-sshfs -o reconnect,transform_symlinks titanrt@147.214.15.96:/proj/TTCN/www/ttcn/root/titan-testresults /home/titanrt/titan_nightly_builds/web
diff --git a/etc/autotest/product_handler.py b/etc/autotest/product_handler.py
deleted file mode 100755
index d4ddc330a..000000000
--- a/etc/autotest/product_handler.py
+++ /dev/null
@@ -1,287 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   Balasko, Jeno
-#   Kovacs, Ferenc
-#
-##############################################################################
-import os, re
-import titan_publisher, utils
-
-MAKEFILE_PATCH = 'makefile_patch.sh'
-
-class product_handler:
-  """ It is assumed that the VOB is reachable (e.g. through an SSHFS mounted
-      directory) on the master machine.  The source package is copied to all
-      slaves with SCP.  It is assumed that the logger is always set.
-
-      Saving stdout and stderr of product actions is optional.  During
-      publishing these files should be linked or copied to somewhere under
-      `public_html'.
-  """
-  def __init__(self, logger = None, config = None):
-    self._logger = logger
-    self._config = config
-
-  def config_products(self, target_path):
-    """ Localize first in the following order: `test', `demo'.  If neither of
-        these directories are found the `src' will be copied, it's a must
-        have.  If there's a Makefile or .prj in `test', `demo', the list of
-        files will be gathered from there.  Otherwise the contents of these
-        directories excluding the Makefile will be copied to the target
-        directory.  A new Makefile will be generated for the copied files.
-        Configspec 129 is used.  It's assumed that the ClearCase access is
-        working and all files are accessible through SSHFS etc.  The `test'
-        directory always has higher priority than `demo'.  However, `demo' is
-        the one released to the customer and not `test', but we're using the
-        TCC_Common VOB anyway.
-
-        We generate single mode Makefiles here for simplicity.  We can grep
-        through the sources for a `create' call, but it's so ugly.  It is
-        assumed that the files enumerated in Makefiles or .prj files are all
-        relative to the current directory.  The source and target paths are
-        class parameters.  It is assumed that the Makefile and the project
-        file are all in the `test' directory.  The Makefile will be ignored if
-        there's a project file in the same directory.
-        
-        The distribution of the VOB package is the job of the master.
-        Configuring the products with `ttcn3_makefilegen' is the job of the
-        slaves.
-
-        Returns:
-          0 if everything went fine and the VOB package is ready for
-          distribution.  1 otherwise.
-    """
-    utils.run_cmd('rm -rf ' + target_path)
-    for kind, products in self._config.products.iteritems():
-      for product in products:
-        product_name = product['name'].strip()
-        local_src_path = os.path.join(os.path.join(self._config.common['vob'], kind), product_name)
-        src_dir = os.path.join(local_src_path, 'src')
-        test_dir = os.path.join(local_src_path, 'test')
-        demo_dir = os.path.join(local_src_path, 'demo')
-        if not os.path.isdir(src_dir):
-          self._logger.error('Missing `src\' directory for product `%s\' ' \
-                             'in %s, skipping product' % (product_name, local_src_path))
-          continue
-        else:
-          dirs_to_copy = []
-          files_to_copy = []
-          if os.path.isdir(test_dir):
-            dirs_to_copy.append(test_dir)
-          elif os.path.isdir(demo_dir):
-            dirs_to_copy.append(demo_dir)
-          else:
-            # No `demo' or `test'.  The `src' is copied only if the other
-            # directories are missing.  There can be junk files as well.  The
-            # Makefile patch script must have a fixed name
-            # `makefile_patch.sh'.
-            dirs_to_copy.append(src_dir)
-            self._logger.debug('Product `%s\' in %s doesn\'t have the `demo\' or `test\' directories'
-                               % (product_name, local_src_path))
-          product_target_path = os.path.join(os.path.join(target_path, kind), product_name)
-          utils.run_cmd('mkdir -p ' + product_target_path)
-          has_prj_file = False
-          for dir in dirs_to_copy:
-            for dir_path, dir_names, file_names in os.walk(dir):
-              if not has_prj_file and \
-                len([file_name for file_name in file_names \
-                     if file_name.endswith('.prj')]) > 0: has_prj_file = True
-              for file_name in file_names:
-                if not has_prj_file:  # Trust the project file if we have one.
-                  files_to_copy.append(os.path.join(dir_path, file_name))
-                if (file_name == 'Makefile' and not has_prj_file) or file_name.endswith('.prj'):
-                  (makefile_patch, extracted_files) = \
-                    self.extract_files(dir_path, os.path.join(dir_path, file_name))
-                  files_to_copy.extend(extracted_files)
-                  if makefile_patch:
-                    utils.run_cmd('cp -Lf %s %s' \
-                      % (makefile_patch, os.path.join(product_target_path, MAKEFILE_PATCH))) 
-          utils.run_cmd('cp -Lf %s %s' % (' '.join(files_to_copy), product_target_path))
-          utils.run_cmd('chmod 644 * ; chmod 755 *.sh', product_target_path)
-          self._logger.debug('Product `%s\' was configured successfully ' \
-                             'with %d files' % (product_name, len(files_to_copy)))
-    return 0 
-
-  def build_products(self, proddir, logdir, config, rt2 = False):
-    """ Build the products provided in the list.  Simple `compiler -s' etc.
-        commands are executed from the directories of the products.  The
-        actions must be synchronized with the product configuration files.
-        The stderr and stdout of actions is captured here, but it's optional.
-
-        Arguments:
-          The directory of the products, the actual build configuration and
-          runtime.
-
-        Returns:
-          The build results in the following format:
-
-          results['kind1'] = [
-            {'name1':{'action1':(1, o1, e1), 'action2':-1}},
-            {'name2':{'action1':(1, o1, e1), 'action2':-1}},
-            {'name3':-1}
-          ]
-
-          The standard output and error channels are returned for each action
-          with the return value.  The return value is usually the exit status
-          of `make' or the `compiler'.  If the element is a simple integer
-          value the action was disabled for the current product.  The output
-          of this function is intended to be used by the presentation layer.
-    """
-    results = {}
-    if not os.path.isdir(logdir):
-      utils.run_cmd('mkdir -p %s' % logdir)
-    for kind, products in self._config.products.iteritems():
-      results[kind] = []
-      for product in products:
-        product_name = product['name'].strip()
-        product_dir = os.path.join(proddir, os.path.join(kind, product_name))
-        if not os.path.isdir(product_dir):
-          # No `src' was found for the product.  Maybe a list would be better
-          # instead.
-          results[kind].append({product_name:-1})
-          continue
-        info = {product_name:{}}
-        for product_key in product.iterkeys():
-          files = ' '.join(filter(lambda file: file.endswith('.ttcn') \
-                                  or file.endswith('.asn'), \
-                                  os.listdir(product_dir)))
-          cmd = None
-          if product_key == 'semantic':
-            if product[product_key]:
-              cmd = '%s/bin/compiler -s %s %s' % (config['installdir'], rt2 and '-R' or '', files)
-            else:
-              info[product_name][product_key] = -1
-              continue
-          elif product_key == 'translate':
-            if product[product_key]:
-              cmd = '%s/bin/compiler %s %s' % (config['installdir'], rt2 and '-R' or '', files)
-            else:
-              info[product_name][product_key] = -1
-              continue
-          elif product_key == 'compile' or product_key == 'run':
-            if product[product_key]:
-              utils.run_cmd('cd %s && %s/bin/ttcn3_makefilegen ' \
-                            '-fp %s *' % (product_dir, config['installdir'], rt2 and '-R' or ''))
-              if os.path.isfile(os.path.join(product_dir, MAKEFILE_PATCH)):
-                self._logger.debug('Patching Makefile of product `%s\' for the %s runtime'
-                                   % (product_name, rt2 and 'function-test' or 'load-test'))
-                utils.run_cmd('cd %s && mv Makefile Makefile.bak' % product_dir)
-                utils.run_cmd('cd %s && ./%s Makefile.bak Makefile' % (product_dir, MAKEFILE_PATCH))
-              cmd = 'make clean ; make dep ; make -j4 ; %s' % (product_key == 'run' and 'make run' or '')
-            else:
-              info[product_name][product_key] = -1
-              continue
-          else:
-            # Skip `name' or other things.
-            continue
-          (retval, stdout, stderr) = utils.run_cmd(cmd, product_dir, 900)
-          prod_stdout = os.path.join(logdir, '%s_%s_%s_stdout.%s' \
-            % (kind, product_name, product_key, rt2 and 'rt2' or 'rt1'))
-          prod_stderr = os.path.join(logdir, '%s_%s_%s_stderr.%s' \
-            % (kind, product_name, product_key, rt2 and 'rt2' or 'rt1'))
-          output_files = (prod_stdout, prod_stderr)
-          try:
-            out_file = open(prod_stdout, 'wt')
-            err_file = open(prod_stderr, 'wt')
-            if 'vobtest_logs' not in config or config['vobtest_logs']:
-              out_file.write(' '.join(stdout))
-              err_file.write(' '.join(stderr))
-            out_file.close()
-            err_file.close()
-          except IOError, (errno, strerror):
-            self._logger.error('Error while dumping product results: %s (%s)' \
-                               % (strerror, errno))
-          info[product_name][product_key] = (retval, output_files, stdout, stderr)
-        results[kind].append(info)
-    return results
-
-  def extract_files(self, path, filename):
-    """ Extract the files need to be copied all around from a given Makefile
-        or .prj file.  It handles wrapped lines (i.e. '\') in Makefiles.  """
-
-    # Interesting patterns in Makefiles and .prj files.  Tuples are faster
-    # than lists, use them for storing constants.
-    prj_matches = ( \
-      '<Module>\s*(.+)\s*</Module>', \
-      '<TestPort>\s*(.+)\s*</TestPort>', \
-      '<Config>\s*(.+)\s*</Config>', \
-      '<Other>\s*(.+)\s*</Other>', \
-      '<Other_Source>\s*(.+)\s*</Other_Source>', \
-      '<File path="\s*(.+)\s*"', \
-      '<File_Group path="\s*(.+)\s*"' )
-    
-    makefile_matches = ( \
-      '^TTCN3_MODULES =\s*(.+)', \
-      '^ASN1_MODULES =\s*(.+)', \
-      '^USER_SOURCES =\s*(.+)', \
-      '^USER_HEADERS =\s*(.+)', \
-      '^OTHER_FILES =\s*(.+)' )
-
-    try:
-      file = open(filename, 'rt')
-    except IOError:
-      self._logger.error('File `%s\' cannot be opened for reading' % filename)
-      return (None, [])
-    
-    files = []
-    makefile_patch = None
-    if re.search('.*[Mm]akefile$', filename):
-      multi_line = False
-      for line in file:
-        line = line.strip()
-        if multi_line:
-          files.extend(map(lambda f: os.path.join(path, f), line.split()))
-          multi_line = line.endswith('\\')
-          if multi_line:
-            files.pop()
-        else:
-          for line_match in makefile_matches:
-            matched = re.search(line_match, line)
-            if matched:
-              files.extend(map(lambda f: os.path.join(path, f),
-                               matched.group(1).split()))
-              multi_line = line.endswith('\\')
-              if multi_line:
-                files.pop()
-    elif re.search('.*\.prj$', filename) or re.search('.*\.grp', filename):
-      files_to_exclude = []
-      for line in file:
-        # Only basic support for Makefile patching, since it doesn't have a
-        # bright future in its current form...
-        matched = re.search('<ScriptFile_AfterMake>\s*(.+)\s*</ScriptFile_AfterMake>', line)
-        if matched:
-          makefile_patch = os.path.join(path, matched.group(1))
-          continue
-        matched = re.search('<UnUsed_List>\s*(.+)\s*</UnUsed_List>', line)
-        if matched:
-          files_to_exclude.extend(matched.group(1).split(','))
-          continue
-        for line_match in prj_matches:
-          matched = re.search(line_match, line)
-          if matched and matched.group(1) not in files_to_exclude:
-            file_to_append = os.path.join(path, matched.group(1))
-            files_to_append = []
-            if file_to_append != filename and file_to_append.endswith('.grp'):
-              self._logger.debug('Group file `%s\' found' % file_to_append)
-              last_slash = file_to_append.rfind('/')      
-              if last_slash != -1:
-                grp_dir = file_to_append[:last_slash]
-                if path.startswith('/'):
-                  grp_dir = os.path.join(path, grp_dir)  
-                (not_used, files_to_append) = self.extract_files(grp_dir, file_to_append)
-              else:
-                self._logger.warning('Skipping contents of `%s\', check ' \
-                                     'this file by hand' % file_to_append)
-            files.append(file_to_append)
-            files.extend(files_to_append)
-            break
-    else:
-      self._logger.error('Unsupported project description file: %s\n' % filename)
-    file.close()
-    return (makefile_patch, files)
diff --git a/etc/autotest/titan_builder.py b/etc/autotest/titan_builder.py
deleted file mode 100755
index f1624558a..000000000
--- a/etc/autotest/titan_builder.py
+++ /dev/null
@@ -1,1131 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#   Beres, Szabolcs
-#   Kovacs, Ferenc
-#   Raduly, Csaba
-#
-##############################################################################
-#!/usr/bin/env python
-
-import logging, optparse, os, re, sys, time, fnmatch
-import socket, traceback, types
-# Never import everything!  E.g. enumerate() can be redefined somewhere.
-# Check out: http://www.phidgets.com/phorum/viewtopic.php?f=26&t=3315.
-import product_handler, titan_builder_cfg, titan_publisher, utils, threading
-
-LOG_FILENAME = './titan_builder.log'
-TRACE_FILENAME = './titan_builder.err-log'
-
-vobtest_lock = threading.Lock()
-
-class config_handler:
-  """ Class to process the semi-configuration file and provide easy access to
-      the configuration data read.  This very same configuration file will be
-      reused by each slave.
-
-      Or simply include the parts of the current shell script setting all
-      environment variables?
-  """
-  def __init__(self, logger):
-    self.logger = logger
-    self.products = titan_builder_cfg.products
-    self.recipients = titan_builder_cfg.recipients
-    self.configs = titan_builder_cfg.configs
-    self.slaves = titan_builder_cfg.slaves
-    self.common = titan_builder_cfg.common
-
-    self.validate_config_data()
-
-  def __str__(self):
-    """ For debugging purposes only.  """
-    results = (str(self.configs), str(self.products), str(self.recipients), \
-               str(self.slaves))
-    return '\n'.join(results)
-
-  def is_used_config(self, config):
-    """ Check if the given build configuration is used by any of the slaves.
-        It is assumed that the configuration file is already validated.
-
-        Arguments:
-          config: The name of the build configuration.
-    """
-    for slave in self.slaves:
-      if config in self.slaves[slave]['configs']:
-        return True
-    # The build configuration will be skipped from the build process.
-    return False
-
-  def validate_config_data(self):
-    """ We have only one configuration file.  The wrong addresses are filtered
-        out automatically.  Add more checks.  Rewrite `installdir' in case of
-        a FOA build.
-    """
-    self.recipients = dict([(key, self.recipients[key]) \
-      for key in self.recipients \
-        if re.match('^<[\w\-\.]+@(\w[\w\-]+\.)+\w+>$', self.recipients[key])])
-#       elif current_section == 'slaves':
-#         row_data = [data.strip() for data in line.split()]
-#         if len(row_data) != 4:
-#         elif not re.match('^\w+\s*(\d{1,3}.){3}\d{1,3}\s*\w+\s*[\w/]+$', line):
-#         else:  # 100% correct data all over.
-#           self.slaves[row_data[0]] = row_data[1:]
-    for config_name, config_data in self.configs.iteritems():
-      if 'foa' in config_data and config_data['foa'] and (not 'foadir' in config_data or len(config_data['foadir']) == 0):
-        config_data['foadir'] = config_data['installdir']  # The final build directory, it'll be linked.
-        config_data['installdir'] = "%s/temporary_foa_builds/TTCNv3-%s" % ('/'.join(config_data['foadir'].split('/')[0:-1]), utils.get_time(True))
-
-class MasterThread(threading.Thread):
-  def __init__(self, titan_builder, config, config_name, slave_list, log_dir, build_dir, tests):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.config_name = config_name
-    self.slave_list = slave_list
-    self.log_dir = log_dir
-    self.build_dir = build_dir
-    self.tests = tests
-    
-  def run(self):
-    self.slave_list.extend(self.titan_builder.master(self.config, self.config_name, self.log_dir, self.build_dir, self.tests))
-
-class RegtestThread(threading.Thread):
-  def __init__(self, titan_builder, config, slave_name):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.slave_name = slave_name
-    
-  def run(self):
-    self.titan_builder.pass_regtest(self.config, self.slave_name)
-
-class FunctestThread(threading.Thread):
-  def __init__(self, titan_builder, config, slave_name):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.slave_name = slave_name
-    
-  def run(self):
-    self.titan_builder.pass_functest(self.config, self.slave_name)
-
-class PerftestThread(threading.Thread):
-  def __init__(self, titan_builder, config, slave_name):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.slave_name = slave_name
-    
-  def run(self):
-    self.titan_builder.pass_perftest(self.config, self.slave_name)
-
-class EclipseThread(threading.Thread):
-  def __init__(self, titan_builder, config, slave_name):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.slave_name = slave_name
-    
-  def run(self):
-    self.titan_builder.pass_eclipse(self.config, self.slave_name)
-
-class VobtestThread(threading.Thread):
-  def __init__(self, titan_builder, config, slave_name):
-    threading.Thread.__init__(self)
-    self.titan_builder = titan_builder
-    self.config = config
-    self.slave_name = slave_name
-    
-  def run(self):
-    self.titan_builder.pass_vobtest(self.config, self.slave_name)
-
-class titan_builder:
-  def __init__(self):
-    self.logger = None
-    self.logger = self.create_logger()
-    self.config = None
-    self.config = self.create_config()
-    self.publisher = None
-    self.publisher = self.create_publisher()
-
-  def create_logger(self):
-    if self.logger:
-      return self.logger
-    logger = logging.getLogger('titan_logger')
-    logger.setLevel(logging.DEBUG)
-    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
-    handler = logging.FileHandler(LOG_FILENAME)
-    handler.setFormatter(formatter)
-    logger.addHandler(handler)
-    sth = logging.StreamHandler()
-    sth.setLevel(logging.DEBUG)
-    logger.addHandler(sth)
-    return logger  # Just like in singleton.
-
-  def create_config(self):
-    """ Create the configuration file handler class.  If it's already created
-        the existing one will be returned.  The configuration cannot be
-        changed during operation.  It cannot be reloaded etc.
-    """
-    if self.config:
-      return self.config
-    return config_handler(self.logger)
-
-  def create_publisher(self):
-    if self.publisher:
-      return self.publisher
-    return titan_publisher.titan_publisher(self.logger, self.config)
-
-  def remove_dups(self, list = []):
-    """ Remove duplicates from a list.
-    """
-    tmp_list = []
-    if len(list) > 0:
-      [tmp_list.append(elem) for elem in list if not elem in tmp_list]
-    return tmp_list
-
-  def build(self, config, slave_name, reset, set_addressees, tests, path):
-    """ Build the specified build configurations.  The configurations are
-        built sequentially.  For the slaves a single build configuration should
-        be specified in the command line.  The slave will abort build execution
-        if there are more build configurations specified.  It's a limitation
-        and should be relaxed later.
-
-        Arguments:
-          config: The list of build configurations specified in the command line.
-          slave_name: The name of the slave if `--slave-mode' is on.
-          reset: Reset statistics.
-          set_addressees: List of recipients.
-          tests: Tests to run for all configurations. 
-
-        Returns:
-          Nothing.  It's the main driver of the whole build.
-    """
-    config_list = []
-    if not config:
-      self.logger.warning('Running all available build configurations from ' \
-                          'the configuration file...')
-      config_list.extend(self.config.configs.keys())
-    elif not re.match('^\w+(,\w+)*$', config):
-      self.logger.error('Invalid build configuration list: `%s\'' % config)
-    else:
-      config = self.remove_dups(config.split(','))
-      for config_elem in config:
-        if not config_elem in self.config.configs.keys():
-          self.logger.error('Build configuration `%s\' not found' \
-                            % config_elem)
-        else:
-          config_list.append(config_elem)
-    if not len(config_list) > 0:
-      self.logger.error('No valid build configurations were found, ' \
-                        'exiting...')
-      return
-    if set_addressees:
-      self.config.recipients = {}
-      addressees = set_addressees.split(',')
-      for addressee in addressees:
-        self.config.recipients[' '.join(addressee.split(' ')[:-1])] = addressee.split(' ')[-1];
-    if not slave_name:
-      everything_started_here = utils.get_time()
-      utils.run_cmd('/bin/rm -rf %s %s && mkdir -p %s %s' \
-                    % (self.config.common['builddir'], self.config.common['logdir'], self.config.common['builddir'], self.config.common['logdir']), None, 1800, self.logger)
-      slave_list = []
-      master_threads = []
-      for config_name in config_list:
-        if not self.config.is_used_config(config_name):
-          self.logger.warning('Skipping unused build configuration: `%s\'' \
-                              % config_name)
-        else:
-          # Create the slave and configuration specific log directory.  If the
-          # logs haven't arrived yet from the given slave, that slave should
-          # be considered lost.
-          build_dir = os.path.join(self.config.common['builddir'], config_name)
-          log_dir = os.path.join(self.config.common['logdir'], config_name)
-          utils.run_cmd('/bin/rm -rf %s %s && mkdir -p %s %s' % (build_dir, log_dir, build_dir, log_dir), None, 1800, self.logger)
-          master_thread = MasterThread(self, self.config.configs[config_name], config_name, slave_list, log_dir, build_dir, tests)
-          master_thread.start()
-          master_threads.append((config_name, master_thread))
-      for config_name, master_thread in master_threads:
-        master_thread.join()
-        self.logger.debug('Master thread for `%s\' joined successfully' % config_name)
-      everything_ended_here = utils.get_time()
-      self.gather_all_stuff_together_and_present_to_the_public( \
-        everything_started_here, everything_ended_here, slave_list, reset)
-    else:
-      # Run the tests on the given slave of each assigned build configuration.
-      # It may cause problems if several configurations are run one after
-      # another, but otherwise it's not possible assign multiple build
-      # configurations at all.
-      for config_name in config_list:
-        self.logger.debug('Hello, from a slave `%s\' running build ' \
-                          'configuration `%s\'' \
-                          % (slave_name, config_name))
-        if tests and len(tests) > 0:
-          self.config.configs[config_name]['functest'] = tests.find('f') != -1
-          self.config.configs[config_name]['perftest'] = tests.find('p') != -1
-          self.config.configs[config_name]['regtest'] = tests.find('r') != -1
-        self.slave(self.config.configs[config_name], config_name, slave_name)
-
-  def get_titan(self, config, config_name, log_dir, build_dir):
-    """ Get the TITAN sources from the CVS repository.  It can do checkouts by
-        tag and date only.  If the version string is omitted HEAD will be
-        used.  The checkout will be made into the build directory.  The output
-        is not handled by the output handler yet.
-
-        Arguments:
-          The build configuration to get TITAN sources for.  Log/build
-          directories.
-
-        Returns:
-          0 on success.  1 if the checkout failed for some reason.  It's most
-          probably a timeout, since the parameters are validated and the
-          existence of `cvs' is required.  So, it's safe to abort the build if
-          1 is returned.
-    """
-    command_line = 'cd %s && cvs get TTCNv3' % build_dir
-    if re.match('^v\d\-\d\-pl\d$', config['version']):
-      command_line = 'cd %s && cvs co -r%s TTCNv3' \
-                     % (build_dir, config['version'])
-    elif re.match('2\d{7}', config['version']):
-      command_line = 'cd %s && cvs co -D%s TTCNv3' \
-                     % (build_dir, config['version'])
-    command_line += ' 1>%s/cvs-%s.stdout 2>%s/cvs-%s.stderr' \
-                    % (log_dir, config_name, log_dir, config_name)
-    self.logger.debug('CVS checkout starting for config `%s\'', config_name)
-    (retval, stdout, stderr) = utils.run_cmd(command_line, None, 10800)
-    if retval:
-      self.logger.error('The CVS checkout failed with command: `%s\', exit status: `%d\', stdout: `%s\', stderr: `%s\'' \
-                        % (command_line, retval, stdout, stderr))
-      return 1  # `retval' is not handled yet.
-    self.logger.debug('CVS checkout finished for config `%s\'', config_name)
-    return 0
-
-  def master(self, config, config_name, log_dir, build_dir, tests):
-    """ Prepare the packages for the slaves.  The regression tests and
-        function tests are part of TITAN, hence the preparations regarding
-        those tests are done together with TITAN.  It seems to make sense.
-        Delete only the `TTCNv3' directory when switching between build
-        configurations.  It's advised to use a different global build
-        directory for the master and slaves.
-
-        Arguments:
-          The current build configuration and its name.
-    """
-    slave_list = []
-    for slave_name in self.config.slaves:
-      slave = self.config.slaves[slave_name]
-      if not config_name in slave['configs']:
-        continue
-      slave_url = '%s@%s' % (slave['user'], slave['ip'])
-      # Need more robust IP address checking.  It doesn't work on my Debian
-      # laptop.  It can return simply `127.0.0.1' and fool this check
-      # completely.
-      is_localhost = socket.gethostbyname(socket.gethostname()) == slave['ip']
-      if self.pass_prepare_titan(config, config_name, slave_name, log_dir, build_dir):
-        continue  # Configuration for a given slave is failed.
-      # The slave list is needed for the last pass.
-      slave_list.append((slave_name, config_name, is_localhost))
-      
-      self.logger.debug('Removing old build `%s\' and log `%s\' ' \
-                        'directories for slave `%s\' and build configuration `%s\'' \
-                        % (config['builddir'], config['logdir'], slave_name, config_name))
-      if is_localhost:  # Cleanup first.
-        utils.run_cmd('/bin/rm -rf %s %s && mkdir -p %s %s' \
-                      % (config['builddir'], config['logdir'],
-                         config['builddir'], config['logdir']), None, 1800, self.logger)
-      else:
-        utils.run_cmd('ssh %s \'/bin/rm -rf %s %s && mkdir -p %s %s\'' \
-                      % (slave_url, config['builddir'], config['logdir'],
-                         config['builddir'], config['logdir']), None, 1800, self.logger)
-
-      if config['perftest']:
-        self.logger.debug('Copying performance tests for slave `%s\'' % slave_name)
-        self.pass_prepare_perftest(config, config_name, slave, slave_name, slave_url, \
-                                   is_localhost)
-      if config['vobtest']:
-        self.logger.debug('Copying VOB product tests for slave `%s\'' % slave_name)
-        self.pass_prepare_vobtest(config, config_name, slave, slave_name, slave_url, \
-                                  is_localhost)
-
-      if is_localhost:  # Optimize local builds.
-        self.logger.debug('It\'s a local build for slave `%s\' and build ' \
-                          'configuration `%s\', working locally' \
-                          % (slave_name, config_name))
-        utils.run_cmd('cp %s/TTCNv3-%s.tar.bz2 %s' % \
-                      (build_dir, config_name, config['builddir']), None, 1800)     
-        utils.run_cmd('cp ./*.py ./*.sh %s' % config['builddir'])
-        utils.run_cmd('cd %s && %s/titan_builder.sh -s %s -c %s %s' \
-                      % (config['builddir'], config['builddir'], \
-                         slave_name, config_name, ((tests and len(tests) > 0) and ('-t %s' % tests) or '')), None, 21600)
-        utils.run_cmd('cp -r %s/%s/* %s' \
-                      % (config['logdir'], slave_name, log_dir))
-      else:
-        self.logger.debug('It\'s a remote build for slave `%s\' and ' \
-                          'build configuration `%s\', doing remote build' \
-                          % (slave_name, config_name))
-        (retval, stdout, stderr) = \
-          utils.run_cmd('scp %s/TTCNv3-%s.tar.bz2 %s:%s' \
-                        % (build_dir, config_name, slave_url,
-                           config['builddir']), None, 1800)
-        if not retval:
-          self.logger.debug('The TITAN package is ready and distributed ' \
-                            'for slave `%s\'' % slave_name)
-        else:
-          self.logger.error('Unable to distribute the TITAN package for ' \
-                            'slave `%s\', it will be skipped from build ' \
-                            'configuration `%s\'' % (slave_name, config_name))
-          continue
-        utils.run_cmd('scp ./*.py %s:%s' % (slave_url, config['builddir']))
-        utils.run_cmd('scp ./*.sh %s:%s' % (slave_url, config['builddir']))
-        utils.run_cmd('ssh %s \'cd %s && %s/titan_builder.sh -s %s -c ' \
-                      '%s %s\'' % (slave_url, config['builddir'], \
-                                config['builddir'], slave_name, config_name, ((tests and len(tests) > 0) and ('-t %s' % tests) or '')   ), None, 21600)
-        utils.run_cmd('scp -r %s:%s/%s/* %s' \
-                      % (slave_url, config['logdir'], slave_name, log_dir))
-        
-    return slave_list
-
-  def gather_all_stuff_together_and_present_to_the_public(self, build_start, \
-    build_end, slave_list, reset):
-    """ Collect and process all logs.  Only the CVS logs are coming from the
-        master.  If the CSV output is not arrived from a slave, then the slave
-        will be considered lost.
-    """
-    build_root = utils.get_time(True)
-    html_root = os.path.join(self.config.common['htmldir'], build_root)
-    utils.run_cmd('mkdir -p %s' % html_root, None, 1800, self.logger)
-    utils.run_cmd('cd %s && /bin/rm -f latest && ln -s %s latest' % (self.config.common['htmldir'], build_root))
-    utils.run_cmd('cp -r %s/* %s' % (self.config.common['logdir'], html_root))
-    email_file = '%s/report.txt' % html_root
-    self.publisher.publish_csv2email(build_start, build_end, email_file, \
-                                     slave_list, build_root, self.config.configs, reset)
-    self.publisher.publish_html(build_root)
-    utils.send_email(self.logger, self.config.recipients, email_file)
-
-  def pass_prepare_titan(self, config, config_name, slave_name, log_dir, build_dir):
-    """ Get TITAN from the CVS and configure it for the actual slave.  Then
-        TITAN archive is created.  The archive is not copied to the actual
-        slave, because this function can be a showstopper for the whole build
-        process for the actual slave.
-
-        Arguments:
-          The build configuration and its name, the actual slave's name,
-          log/build directories.
-
-        Returns:
-          0 if everything went fine.  1 is returned when e.g. the CVS was
-          unreachable or the TITAN configuration failed for some reason.
-          Returning 1 should stop the build process for the actual slave.
-    """
-    if self.get_titan(config, config_name, log_dir, build_dir):
-      self.logger.error('The CVS checkout failed for slave `%s\' and ' \
-                        'build configuration `%s\'' \
-                        % (slave_name, config_name))
-      return 1
-    if self.config_titan(config, build_dir):
-      self.logger.error('Configuring TITAN failed for slave `%s\' ' \
-                        'and build configuration `%s\'' \
-                        % (slave_name, config_name))
-      return 1
-    utils.run_cmd('cd %s && tar cf TTCNv3-%s.tar ./TTCNv3' \
-                  % (build_dir, config_name), None, 1800)
-    utils.run_cmd('cd %s && bzip2 TTCNv3-%s.tar' \
-                  % (build_dir, config_name), None, 1800)
-    utils.run_cmd('/bin/rm -rf %s/TTCNv3' % build_dir, None, 1800)
-    return 0
-
-  def pass_prepare_perftest(self, config, config_name, slave, slave_name, slave_url, \
-                            is_localhost):
-    """ Copy the performance test package to the actual slave.  It's a simple
-        archive.  Its location is defined in the configuration file.
-
-        Arguments:
-          The build configuration and its name with the actual slave and its
-          name.  Nothing is returned.
-    """
-    if os.path.isfile(config['perftestdir']):
-      if is_localhost:
-        utils.run_cmd('cp -f %s %s' % (config['perftestdir'], \
-                      config['builddir']))
-      else:
-        (retval, stdout, stderr) = utils.run_cmd('scp %s %s:%s' \
-          % (config['perftestdir'], slave_url, config['builddir']))
-        if retval:
-          self.logger.error('Unable to copy performance test package ' \
-                            'to slave `%s\'' % slave_name)
-    else:
-      self.logger.error('The performance test package cannot be found at ' \
-                        '`%s\'' % config['perftestdir'])
-
-  def pass_prepare_vobtest(self, config, config_name, slave, slave_name, slave_url, \
-                           is_localhost):
-    """ Collect and configure the VOB products.  The products will be
-        collected only if there's no file matching the `vobtest-*.tar.bz2'
-        pattern in the build directory.  The resulting archive is copied to
-        the given slave only if it's a remote slave.  The errors are reported
-        to the local error log.  The URL of the slave is calculated locally.
-
-        Arguments:
-          The build configuration and its name with the actual slave and its
-          name.  Nothing is returned.
-    """
-    vobtest_lock.acquire()
-    really_collect_products = len([file for file in os.listdir(self.config.common['builddir']) \
-                                   if fnmatch.fnmatch(file, 'vobtest\-.*\.tar\.bz2')]) == 0
-    if really_collect_products:
-      handler = product_handler.product_handler(self.logger, self.config)
-      if handler.config_products('%s/vobtest' % self.config.common['builddir']):
-        self.logger.error('Configuring VOB products failed for slave: ' \
-                          '`%s\' and build configuration: `%s\'' \
-                          % (slave_name, config_name))
-        return
-      utils.run_cmd('cd %s && tar cf vobtest-%s.tar ./vobtest' \
-                    % (self.config.common['builddir'], \
-                       time.strftime('%Y%m%d')), None, 1800)
-      utils.run_cmd('cd %s && bzip2 vobtest-*.tar' \
-                    % self.config.common['builddir'], None, 1800)
-      utils.run_cmd('/bin/rm -rf %s/vobtest %s/vobtest-*.tar' \
-                    % (self.config.common['builddir'], \
-                       self.config.common['builddir']), None, 1800)
-    else:
-      self.logger.debug('VOB products don\'t need to be configured again')
-    vobtest_lock.release()
-    if not is_localhost:
-      (retval, stdout, stderr) = \
-        utils.run_cmd('scp %s/vobtest-*.tar.bz2 %s:%s' \
-                      % (self.config.common['builddir'], slave_url, \
-                         config['builddir']))
-      if retval:
-        self.logger.error('Copying the VOB package to slave: ' \
-                          '`%s\' failed for build configuration: `%s\'' \
-                          % (slave_name, config_name))
-    else:
-      utils.run_cmd('cp %s/vobtest-*.tar.bz2 %s' \
-                    % (self.config.common['builddir'], config['builddir']))
-
-  def slave(self, config, config_name, slave_name):
-    """ Run the build passes sequentially.  If the TITAN build fails, the
-        remaining passes are skipped.  Log everything.  All the results will
-        be written in all supported formats.  It should be configurable.
-    """
-    self.logger.debug('Setting environment variables from `pass_setenv()\'')
-    self.pass_setenv(config, slave_name)
-    self.logger.debug('Building TITAN from `pass_titan()\'')
-    stamp_old = utils.get_time()
-    if not self.pass_titan(config, config_name, slave_name):
-      test_threads = []
-      if config['regtest']:
-        regtest_thread = RegtestThread(self, config, slave_name)
-        regtest_thread.start()
-        test_threads.append(('regression tests', regtest_thread))
-        self.logger.debug('Running regression tests from `pass_regtest()\'')
-      if config['functest']:
-        functest_thread = FunctestThread(self, config, slave_name)
-        functest_thread.start()
-        test_threads.append(('function tests', functest_thread))
-        self.logger.debug('Running function tests from `pass_functest()\'')
-      if config['perftest']:
-        perftest_thread = PerftestThread(self, config, slave_name)
-        perftest_thread.start()
-        test_threads.append(('performance tests', perftest_thread))
-        self.logger.debug('Running performance tests from `pass_perftest()\'')
-      if 'eclipse' in config and config['eclipse']:
-        eclipse_thread = EclipseThread(self, config, slave_name)
-        eclipse_thread.start()
-        test_threads.append(('eclipse tests', eclipse_thread))
-        self.logger.debug('Running Eclipse build from `pass_eclipse()\'')
-      if config['vobtest']:
-        vobtest_thread = VobtestThread(self, config, slave_name)
-        vobtest_thread.start()
-        test_threads.append(('VOB product tests', vobtest_thread))
-        self.logger.debug('Running VOB product tests from `pass_vobtest()\'')
-      for test_thread_name, test_thread in test_threads:
-        test_thread.join()
-        self.logger.debug('Thread for `%s\' joined successfully' % test_thread_name)
-    self.publisher.dump_csv(stamp_old, utils.get_time(), config, config_name, slave_name)
-    self.publisher.dump_txt(stamp_old, utils.get_time(), config, config_name, slave_name)
-    self.publisher.dump_html(stamp_old, utils.get_time(), config, config_name, slave_name)
-    self.logger.debug('Finalizing build from using `pass_slave_postprocess()\'')
-    self.pass_slave_postprocess(config, config_name, slave_name)
-
-  def pass_slave_postprocess(self, config, config_name, slave_name):
-    """ Archive stuff and make everything available for the master.  The
-        master will copy all necessary stuff.  The build directory is
-        available until the next build.  Do the cleanup here.  The installation
-        directory is never removed.
-
-        Arguments:
-          The current build configuration.
-    """
-    utils.run_cmd('cd %s && tar cf TTCNv3-%s-bin.tar ./TTCNv3' \
-                  % (config['builddir'], config_name), None, 1800)
-    utils.run_cmd('bzip2 %s/TTCNv3-%s-bin.tar' \
-                  % (config['builddir'], config_name), None, 1800)
-    utils.run_cmd('/bin/rm -rf %s/TTCNv3' % config['builddir'], None, 1800)
-    utils.run_cmd('/bin/rm -f %s/TTCNv3-%s.tar.bz2' \
-                  % (config['builddir'], config_name))
-    utils.run_cmd('cd %s && /bin/rm -f *.py *.pyc *.sh' % config['builddir'])
-    utils.run_cmd('mv -f %s %s %s/%s' % (LOG_FILENAME, TRACE_FILENAME, \
-                                         config['logdir'], slave_name))
-
-  def pass_titan(self, config, config_name, slave_name):
-    """ Build pass for TITAN itself.  It is assumed that the master have
-        already copied the TITAN package to the build directory.  It's the
-        only requirement here.  If the installation fails the TITAN build is
-        considered as a failure.  Only the `make install' is taken into
-        consideration.
-
-        Arguments:
-          The current build configuration of the slave and its name.
-
-        Returns:
-          1 on error, e.g. if the TITAN package is not present.  0 if the
-          TITAN package was found and the full build completed successfully.
-    """
-    stamp_begin = utils.get_time()
-    utils.run_cmd('mkdir -p %s/%s' % (config['logdir'], slave_name), None, 1800, self.logger)
-    utils.run_cmd('bunzip2 %s/TTCNv3-%s.tar.bz2' \
-                  % (config['builddir'], config_name), None, 1800, self.logger)
-    utils.run_cmd('cd %s && tar xf TTCNv3-%s.tar && bzip2 %s/TTCNv3-*.tar' \
-                  % (config['builddir'], config_name, config['builddir']), \
-                  None, 1800, self.logger)
-    if not os.path.isdir('%s/TTCNv3' % config['builddir']):
-      self.logger.error('The `%s/TTCNv3\' directory is not found' \
-                        % config['builddir'])
-      self.publisher.titan_out(config, slave_name, \
-                               (stamp_begin, utils.get_time(), None))
-      return 1
-    utils.run_cmd('cd %s && find . -exec touch {} \;' % config['builddir'], None, 1800)
-    (ret_val_dep, stdout_dep, stderr_dep) = \
-      utils.run_cmd('cd %s/TTCNv3 && make dep 2>&1' \
-                    % config['builddir'], None, 1800)
-    (ret_val_make, stdout_make, stderr_make) = \
-      utils.run_cmd('cd %s/TTCNv3 && make -j4 2>&1' \
-                    % config['builddir'], None, 1800)
-    (ret_val_install, stdout_install, stderr_install) = \
-      utils.run_cmd('cd %s/TTCNv3 && make install 2>&1' \
-                    % config['builddir'], None, 1800)
-    if ret_val_make or ret_val_install:
-      self.logger.error('TITAN build failed for slave `%s\', please check ' \
-                        'the logs for further investigation, stopping slave ' \
-                        % slave_name)
-    output = (stamp_begin, utils.get_time(), \
-              ((ret_val_dep, stdout_dep, stderr_dep), \
-               (ret_val_make, stdout_make, stderr_make), \
-               (ret_val_install, stdout_install, stderr_install)))
-    self.publisher.titan_out(config, slave_name, output)
-    if ret_val_dep or ret_val_make or ret_val_install:
-      return 1
-    else:
-      if 'foa' in config and config['foa'] and 'foadir' in config and config['foadir'] != config['installdir']:
-        # The `installdir' must be removed by hand after a FOA period.  Cannot
-        # be automated in a sane way.  For FOA `installdir' shall be a unique
-        # directory.  E.g. date based.  Otherwise, the builds are always
-        # overwritten.
-        self.logger.debug('Linking directories for FOA build to `%s\'' % config['foadir'])
-        (ret_val_rm, stdout_rm, stderr_rm) = utils.run_cmd('/bin/rm -rvf %s' % config['foadir'])
-        if ret_val_rm:  # Sometimes it doesn't work.
-          self.logger.error('Unable to remove `%s\': `%s\'' % (config['foadir'], ''.join(stderr_rm)))
-        utils.run_cmd('ln -s %s %s' % (config['installdir'], config['foadir']))
-      return 0
-
-  def pass_setenv(self, config, slave_name):
-    """ Set some environment variables needed to run the TITAN tests.  Don't
-        use uppercase latters in directory names.  The GCC is added as well.
-        Always check if an environment variable exists before reading it.
-
-        Arguments:
-          The current build configuration of the slave and its name.
-    """
-    path = os.environ.get('PATH')
-    ld_library_path = os.environ.get('LD_LIBRARY_PATH')
-    os.environ['PATH'] = '%s/bin:%s/bin:%s' % (config['installdir'], config['gccdir'], path and path or '')
-    os.environ['LD_LIBRARY_PATH'] = '%s/lib:%s/lib:%s' % (config['installdir'], config['gccdir'], ld_library_path and ld_library_path or '')
-    os.environ['TTCN3_DIR'] = config['installdir']
-    os.environ['TTCN3_LICENSE_FILE'] = config['license']
-
-  def pass_regtest_helper(self, config, slave_name, runtime):
-    """ Run the regression tests with `make' and then `make run'.  The output
-        is sent to the publisher as well.  At the end, `make clean' is done to
-        save some bytes.  Don't use `tee', since its exit code will always be
-        0.  Only `stdout' is used.
-
-        Arguments:
-          config: The current build configuration.
-          slave_name: The name of the slave.
-          runtime: 0 for the load-test run-time, 1 for the function-test
-                   runtime.
-    """
-    utils.run_cmd('cd %s/TTCNv3/regression_test && make distclean' \
-                  % config['builddir'], None, 1800)
-    (ret_val_make, stdout_make, stderr_make) = \
-      utils.run_cmd('cd %s/TTCNv3/regression_test && %s make 2>&1' \
-                    % (config['builddir'], runtime and 'RT2=1' or ''), None, 3600)
-    if ret_val_make:
-      self.logger.error('The regression test failed to build for the ' \
-                        '`%s\' runtime' % (runtime and 'function-test' or 'load-test'))
-    (ret_val_run, stdout_run, stderr_run) = \
-      utils.run_cmd('cd %s/TTCNv3/regression_test && %s make run 2>&1' \
-                    % (config['builddir'], runtime and 'RT2=1' or ''), None, 1800)
-    failed_lines = []
-    all_fine = True
-    for index, line in globals()['__builtins__'].enumerate(stdout_run):
-      matched_line = re.search('Verdict stat.*pass \((\d+)\..*', line)
-      if matched_line and int(matched_line.group(1)) != 100:
-        if all_fine and not failed_lines:
-          failed_lines.append('\nThe failed tests were the following:\n\n')
-        if not re.search('TverdictOper', stdout_run[index - 1]):
-          all_fine = False
-        failed_lines.append(stdout_run[index - 1])
-        failed_lines.append(line)
-    stdout_run.extend(failed_lines)
-    if ret_val_run or not all_fine:
-      self.logger.error('The regression test failed to run for the ' \
-                        '`%s\' runtime' % (runtime and 'function-test' or 'load-test'))
-      ret_val_run = 1
-    utils.run_cmd('cd %s/TTCNv3/regression_test && make clean' \
-                  % config['builddir'], None, 1800)
-    return ((ret_val_make, stdout_make, stderr_make), \
-             (ret_val_run, stdout_run, stderr_run))
-
-  def pass_regtest(self, config, slave_name):
-    """ Build and run the regression tests and publish the results.  The
-        `pass_regtest_helper()' does the dirty job.
-
-        Arguments:
-          config: The current build configuration.
-          slave_name: The name of the slave.
-    """
-    output = {}
-    stamp_begin = utils.get_time()
-    rt1_results = self.pass_regtest_helper(config, slave_name, 0)
-    output['rt1'] = (stamp_begin, utils.get_time(), rt1_results)
-    if config['rt2']:
-      stamp_begin = utils.get_time()
-      rt2_results = self.pass_regtest_helper(config, slave_name, 1)
-      output['rt2'] = (stamp_begin, utils.get_time(), rt2_results)
-    self.publisher.regtest_out(config, slave_name, output)
-
-  def pass_eclipse(self, config, slave_name):
-    """ Build Eclipse plugins and publish them to an update site.
-
-        Arguments:
-          config: The current build configuration.
-          slave_name: The name of the slave.
-    """
-    output = {}
-    stamp_begin = utils.get_time()
-    results = utils.run_cmd('cd %s/TTCNv3/eclipse/automatic_build && ant -d -l mylog.log -f build_main.xml updatesite.experimental 2>&1' \
-                            % config['builddir'], None, 1800)
-    log_dir = os.path.join(config['logdir'], slave_name)
-    utils.run_cmd('cp %s/TTCNv3/eclipse/automatic_build/mylog.log %s/eclipse-mylog.log' \
-                  % (config['builddir'], log_dir))                            
-    output = (stamp_begin, utils.get_time(), os.path.join(log_dir, 'eclipse-mylog.log'), results)
-    self.publisher.eclipse_out(config, slave_name, output)
-
-  def pass_perftest_helper(self, config, slave_name, runtime):
-    """ Build the performance test and run it for some predefined CPS values.
-        These CPS values should come from the build configurations instead.
-        Obviously, if the build fails all test runs are skipped.  It handles
-        its own tarball as well.  It's unpacked at the beginning and removed
-        at the end.  The results are also published.
-
-        Arguments:
-          The actual build configuration and the name of the slave.  The
-          function returns nothing.
-    """
-    perftest_out = {}
-    utils.run_cmd('cd %s/perftest && ttcn3_makefilegen -e titansim %s ' \
-                  '*.ttcnpp *.ttcnin *.ttcn *.cc *.cfg' \
-                  % (config['builddir'], (runtime and '-fpgR' or '-fpg')))
-    # Avoid infinite recursion.
-    utils.run_cmd('sed \'s/^-include $(DEPFILES)$//\' Makefile >Makefile-tmp && mv Makefile-tmp Makefile',
-                  os.path.join(config['builddir'], 'perftest'))
-    utils.run_cmd('cd %s/perftest && make clean' % config['builddir'])
-    (ret_val_dep, stdout_dep, stderr_dep) = \
-      utils.run_cmd('cd %s/perftest && find . -exec touch {} \; && make %s dep 2>&1' \
-                    % (config['builddir'], (runtime and 'RT2=1' or '')), \
-                    None, 900)
-    (ret_val_make, stdout_make, stderr_make) = \
-      utils.run_cmd('cd %s/perftest && make %s 2>&1' \
-                    % (config['builddir'], (runtime and 'RT2=1' or '')), \
-                    None, 1800)
-    perftest_out['dep'] = (ret_val_dep, stdout_dep, stderr_dep)
-    perftest_out['make'] = (ret_val_make, stdout_make, stderr_make)
-    perftest_out['run'] = []
-    if not ret_val_make:
-      cps_min = config.get('cpsmin', 1000)
-      cps_max = config.get('cpsmax', 2000)
-      cps_diff = abs(cps_max - cps_min) / 5
-      for cps in range(cps_min, cps_max + cps_diff, cps_diff):
-        # These numbers should be platform dependent.  Lower on slow
-        # machines and high on fast machines.
-        (ret_val_run, stdout_run, stderr_run) = \
-          utils.run_cmd('cd %s/perftest && cpp -DTSP_CPS_CPP=%d.0 config.cfg >config.cfg-tmp && ' \
-                        'ttcn3_start ./titansim ./config.cfg-tmp 2>&1' \
-                        % (config['builddir'], cps), None, 900)
-        for line in stdout_run:
-          matched_line = re.search('Verdict stat.*pass \((\d+)\..*', line)
-          if matched_line and int(matched_line.group(1)) != 100:
-            self.logger.error('Performance test failed to run for `%d\' CPSs' % cps)
-            ret_val_run = 1
-        perftest_out['run'].append((cps, (ret_val_run, stdout_run, stderr_run)))
-    else:
-      self.logger.error('Performance test compilation failed for the ' \
-                        '`%s\' runtime' % (runtime and 'function-test' or 'load-test'))
-    return perftest_out
-
-  def pass_perftest(self, config, slave_name):
-    """ Build and run the performance tests and publish the results.  The
-        `pass_perftest_helper()' does the dirty job.
-
-        Arguments:
-          config: The current build configuration.
-          slave_name: The name of the slave.
-    """
-    utils.run_cmd('bunzip2 perftest-*.tar.bz2', config['builddir'], 1800)
-    utils.run_cmd('tar xf ./perftest-*.tar && bzip2 ./perftest-*.tar', \
-                  config['builddir'], 1800)
-    if not os.path.isdir('%s/perftest' % config['builddir']):
-      self.logger.error('The performance test is not available at ' \
-                        '`%s/perftest\'' % config['builddir'])
-    else:
-      output = {}
-      stamp_begin = utils.get_time()
-      rt1_results = self.pass_perftest_helper(config, slave_name, 0)
-      output['rt1'] = (stamp_begin, utils.get_time(), rt1_results)
-      if config['rt2']:
-        stamp_begin = utils.get_time()
-        rt2_results = self.pass_perftest_helper(config, slave_name, 1)
-        output['rt2'] = (stamp_begin, utils.get_time(), rt2_results)
-      self.publisher.perftest_out(config, slave_name, output)
-    utils.run_cmd('/bin/rm -rf %s/perftest*' % config['builddir'], None, 1800)
-
-  def pass_functest_helper(self, config, slave_name, runtime):
-    function_test_prefix = '%s/TTCNv3/function_test' % config['builddir']
-    function_test_prefixes = ('%s/BER_EncDec' % function_test_prefix, \
-                              '%s/Config_Parser' % function_test_prefix, \
-                              '%s/RAW_EncDec' % function_test_prefix, \
-                              '%s/Semantic_Analyser' % function_test_prefix, \
-                              '%s/Text_EncDec' % function_test_prefix, \
-                              '%s/Semantic_Analyser/float' % function_test_prefix, \
-                              '%s/Semantic_Analyser/import_of_iports' % function_test_prefix, \
-                              '%s/Semantic_Analyser/options' % function_test_prefix, \
-                              '%s/Semantic_Analyser/ver' % function_test_prefix, \
-                              '%s/Semantic_Analyser/xer' % function_test_prefix)
-    log_dir = os.path.join(config['logdir'], slave_name)
-    functest_out = {}
-    stamp_old = utils.get_time()
-    for function_test in function_test_prefixes:
-      utils.run_cmd('ln -s %s %s' % (config['perl'], function_test))
-      function_test_name = function_test.split('/')[-1]
-      ber_or_raw_or_text = not (function_test_name == 'Config_Parser' or function_test_name == 'Semantic_Analyser')
-      utils.run_cmd('cd %s && %s ./%s %s 2>&1 | tee %s/functest-%s.%s' \
-                    % (function_test, (runtime and 'RT2=1' or ''),
-                       (os.path.isfile('%s/run_test_all' % function_test) \
-                                      and 'run_test_all' or 'run_test'), \
-                       ((runtime and not ber_or_raw_or_text) and '-rt2' or ''), \
-                       log_dir, function_test_name, \
-                       (runtime and 'rt2' or 'rt1')), None, 3600)
-      error_target = os.path.join(log_dir, 'functest-%s-error.%s' % (function_test_name, (runtime and 'rt2' or 'rt1')))  
-      if ber_or_raw_or_text:
-        utils.run_cmd('cp %s/%s_TD.script_error %s' \
-                      % (function_test, function_test_name, error_target))
-        utils.run_cmd('cp %s/%s_TD.fast_script_error %s' \
-                      % (function_test, function_test_name, error_target))
-      functest_out[function_test_name] = \
-        ('%s/functest-%s.%s' % (log_dir, function_test_name, (runtime and 'rt2' or 'rt1')), \
-         (ber_or_raw_or_text and error_target or ''))
-    return functest_out
-
-  def pass_functest(self, config, slave_name):
-    """ Build pass to build and run the function tests.  The
-        `pass_functest_helper()' does the direty job.
-    """
-    output = {}
-    stamp_begin = utils.get_time()
-    rt1_results = self.pass_functest_helper(config, slave_name, 0)
-    output['rt1'] = (stamp_begin, utils.get_time(), rt1_results)
-    if config['rt2']:
-      stamp_begin = utils.get_time()
-      rt2_results = self.pass_functest_helper(config, slave_name, 1)
-      output['rt2'] = (stamp_begin, utils.get_time(), rt2_results)
-    self.publisher.functest_out(config, slave_name, output)
-
-  def pass_vobtest(self, config, slave_name):
-    """ Build pass for the VOB products.  Currently, the VOB products are
-        compiled only due to the lack of usable tests written for them.  The
-        output is stored here by the publisher.  The normal runtime should
-        always be the first, it's a restriction of the publisher.
-
-        Arguments:
-          The actual build configuration and its name.
-    """
-    utils.run_cmd('bunzip2 %s/vobtest-*.tar.bz2' \
-                  % config['builddir'], None, 1800)
-    utils.run_cmd('cd %s && tar xf ./vobtest-*.tar && bzip2 ./vobtest-*.tar' \
-                  % config['builddir'], None, 1800)
-    if not os.path.isdir('%s/vobtest' % config['builddir']):
-      self.logger.error('The products are not available at `%s/vobtest\'' \
-                        % config['builddir'])
-      self.publisher.vobtest_out(utils.get_time(), utils.get_time(), {})
-    else:
-      output = {}
-      stamp_begin = utils.get_time()
-      handler = product_handler.product_handler(self.logger, self.config)
-      log_dir = '%s/%s/products' % (config['logdir'], slave_name)
-      results = handler.build_products('%s/vobtest' % config['builddir'], \
-                                       log_dir, config, False)
-      output['rt1'] = (stamp_begin, utils.get_time(), results)
-      if config['rt2']:
-        stamp_begin = utils.get_time()
-        results = handler.build_products('%s/vobtest' \
-                                         % config['builddir'], log_dir, config, True)
-        output['rt2'] = (stamp_begin, utils.get_time(), results)
-      self.publisher.vobtest_out(config, slave_name, output)
-    utils.run_cmd('/bin/rm -rf %s/vobtest*' % config['builddir'], None, 1800)
-
-  def config_titan(self, config, build_dir):
-    """ Modify TITAN configuration files to create a platform-specific source
-        package.  The original files are always preserved in an `*.orig' file.
-        `sed' would be shorter, but this way everything is under control.
-        Improve file handling.  It is assumed, that there's a `TTCNv3' directory
-        in the build directory.
-
-        Arguments:
-          config: The build configuration we're configuring for.
-
-        Returns:
-          If everything went fine 0 is returned.  Otherwise 1 is returned and
-          the error messages will be logged.  The screen always stays intact.
-    """
-    if not os.path.isdir('%s/TTCNv3' % build_dir):
-      self.logger.error('The `%s/TTCNv3\' directory is not found' % build_dir)
-      return 1  # It's a fatal error, no way out.
-    # Prepare all function tests.  Add links to the `perl' interpreter and
-    # modify some some Makefiles containing the platform string.
-    if config['functest']:
-      function_test_prefix = '%s/TTCNv3/function_test' % build_dir
-      for function_test in ('%s/BER_EncDec' % function_test_prefix, \
-                            '%s/Config_Parser' % function_test_prefix, \
-                            '%s/RAW_EncDec' % function_test_prefix, \
-                            '%s/Semantic_Analyser' % function_test_prefix, \
-                            '%s/Text_EncDec' % function_test_prefix):
-        if os.path.isdir(function_test):
-          if function_test.endswith('BER_EncDec') or \
-             function_test.endswith('RAW_EncDec') or \
-             function_test.endswith('Text_EncDec'):
-            utils.run_cmd('mv %s/Makefile %s/Makefile.orig' \
-                          % (function_test, function_test))
-            berrawtext_makefile = open('%s/Makefile.orig' % function_test, 'rt')
-            berrawtext_makefile_new = open('%s/Makefile' % function_test, 'wt')
-            for line in berrawtext_makefile:
-              if re.match('^PLATFORM\s*:?=\s*\w+$', line):  # Platform
-                # autodetect later.  It is hard-coded into the build
-                # configuration.
-                berrawtext_makefile_new.write('PLATFORM = %s\n' % config['platform'])
-              elif re.match('^CXX\s*:?=\s*.*$', line) and ('cxx' in config and len(config['cxx']) > 0):
-                berrawtext_makefile_new.write('CXX = %s\n' % config['cxx'])
-              else:
-                berrawtext_makefile_new.write(line)
-            if function_test.endswith('BER_EncDec'):
-              utils.run_cmd('mv %s/run_test %s/run_test.orig' \
-                            % (function_test, function_test))
-              utils.run_cmd('cat %s/run_test.orig | ' \
-                            'sed s/TD.script/TD.fast_script/ >%s/run_test ' \
-                            '&& chmod 755 %s/run_test' \
-                            % (function_test, function_test, function_test))  # Make it fast.
-        else:
-          self.logger.warning('Function test directory `%s\' is not found'
-                              % function_test)
-    # Add `-lncurses' for all `LINUX' targets.  It's not always needed, hence
-    # platform autodetect won't help in this.
-    if config['platform'] == 'LINUX':
-      mctr_makefile_name = '%s/TTCNv3/mctr2/mctr/Makefile' % build_dir
-      if os.path.isfile(mctr_makefile_name):
-        utils.run_cmd('mv %s %s.orig' % (mctr_makefile_name, mctr_makefile_name))
-        mctr_makefile = open('%s.orig' % mctr_makefile_name, 'rt')
-        mctr_makefile_new = open(mctr_makefile_name, 'wt')
-        for line in mctr_makefile:
-          if re.match('^LINUX_CLI_LIBS\s*:?=\s*$', line):
-            mctr_makefile_new.write('LINUX_CLI_LIBS := -lncurses\n')
-          else:
-            mctr_makefile_new.write(line)
-        mctr_makefile.close()
-        mctr_makefile_new.close()
-      else:
-        self.logger.warning('The `%s\' is not found' % mctr_makefile_name)
-    # Prepare the main configuration file.
-    makefile_cfg_name = '%s/TTCNv3/Makefile.cfg' % build_dir
-    if os.path.isfile(makefile_cfg_name):
-      utils.run_cmd('mv %s %s.orig' % (makefile_cfg_name, makefile_cfg_name))
-      makefile_cfg = open('%s.orig' % makefile_cfg_name, 'rt')
-      makefile_cfg_new = open(makefile_cfg_name, 'wt')
-      for line in makefile_cfg:
-        if re.match('^TTCN3_DIR\s*:?=\s*.*$', line):
-          # Use the environment.
-          continue
-        elif re.match('^DEBUG\s*:?=\s*.*$', line):
-          makefile_cfg_new.write('DEBUG := %s\n' % (config['debug'] and 'yes' or 'no'))
-        elif re.match('^# PLATFORM\s*:?=\s*.*$', line) and len(config['platform']) > 0:
-          # Automatic platform detection doesn't seem to work very well, so the
-          # platform should always be set explicitly.
-          makefile_cfg_new.write('PLATFORM := %s\n' % config['platform'])
-        elif re.match('^JNI\s*:?=\s*.*$', line):
-          # It's the so called `and-or' trick from http://diveintopython.org/
-          # power_of_introspection/and_or.html.
-          makefile_cfg_new.write('JNI := %s\n' % (config['jni'] and 'yes' or 'no'))
-        elif re.match('^GUI\s*:?=\s*.*$', line):
-          makefile_cfg_new.write('GUI := %s\n' % (config['gui'] and 'yes' or 'no'))
-        elif re.match('^FLEX\s*:?=\s*.*$', line) and len(config['flex']) > 0:
-          makefile_cfg_new.write('FLEX := %s\n' % config['flex'])
-        elif re.match('^BISON\s*:?=\s*.*$', line) and len(config['bison']) > 0:
-          makefile_cfg_new.write('BISON := %s\n' % config['bison'])
-        elif re.match('^CC\s*:?=\s*.*$', line) and len(config['gccdir']) > 0:
-          makefile_cfg_new.write('CC := %s/bin/%s\n' % (config['gccdir'], (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc')))
-        elif re.match('^CXX\s*:?=\s*.*$', line) and len(config['gccdir']) > 0:
-          makefile_cfg_new.write('CXX := %s/bin/%s\n' % (config['gccdir'], (('cxx' in config and len(config['cxx']) > 0) and config['cxx'] or 'g++')))
-        elif re.match('^JDKDIR\s*:?=\s*.*$', line) and len(config['jdkdir']) > 0:
-          makefile_cfg_new.write('JDKDIR := %s\n' % config['jdkdir'])
-        elif re.match('^QTDIR\s*:?=\s*.*$', line) and len(config['qtdir']) > 0:
-          makefile_cfg_new.write('QTDIR = %s\n' % config['qtdir'])
-        elif re.match('^XMLDIR\s*:?=\s*.*$', line) and len(config['xmldir']) > 0:
-          makefile_cfg_new.write('XMLDIR = %s\n' % config['xmldir'])
-        elif re.match('^OPENSSL_DIR\s*:?=\s*.*$', line) and len(config['openssldir']) > 0:
-          makefile_cfg_new.write('OPENSSL_DIR = %s\n' % config['openssldir'])
-        elif re.match('^LDFLAGS\s*:?=\s*.*$', line) and len(config['ldflags']) > 0:
-          makefile_cfg_new.write('LDFLAGS = %s\n' % config['ldflags'])
-        elif re.match('^COMPILERFLAGS\s*:?=\s*.*$', line) and len(config['compilerflags']) > 0:
-          makefile_cfg_new.write('COMPILERFLAGS = %s\n' % config['compilerflags'])
-        else:
-          makefile_cfg_new.write(line)
-      makefile_cfg.close()
-      makefile_cfg_new.close()
-    else:
-      self.logger.error('The `%s\' is not found, it seems to be a fake ' \
-                        'installation' % makefile_cfg_name)
-      return 1  # It's essential, exit immediately.
-    if config['regtest']:
-      regtest_makefile_name = '%s/TTCNv3/regression_test/Makefile.regression' % build_dir
-      if os.path.isfile(regtest_makefile_name):
-        utils.run_cmd('mv %s %s.orig' \
-                      % (regtest_makefile_name, regtest_makefile_name))
-        regtest_makefile = open('%s.orig' % regtest_makefile_name, 'rt')
-        regtest_makefile_new = open(regtest_makefile_name, 'wt')
-        for line in regtest_makefile:
-          if re.match('^TTCN3_DIR\s*:?=\s*.*$', line):
-            # Use the environment.
-            continue
-          elif re.match('^CC\s*:?=\s*.*$', line) and len(config['gccdir']) > 0:
-            regtest_makefile_new.write('CC := %s/bin/%s\n' % (config['gccdir'], (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc')))
-          elif re.match('^CXX\s*:?=\s*.*$', line) and len(config['gccdir']) > 0:
-            regtest_makefile_new.write('CXX := %s/bin/%s\n' % (config['gccdir'], (('cxx' in config and len(config['cxx']) > 0) and config['cxx'] or 'g++')))
-          elif re.match('^XMLDIR\s*:?=\s*.*$', line) and len(config['xmldir']) > 0:
-            regtest_makefile_new.write('XMLDIR = %s\n' % config['xmldir'])            
-          else:
-            regtest_makefile_new.write(line)
-        regtest_makefile.close()
-        regtest_makefile_new.close()
-      else:
-        self.logger.warning('Regression test configuration file `%s\' is ' \
-                            'not found' % regtest_makefile_name)
-      if 'xsdtests' in config and not config['xsdtests']:
-        self.logger.warning('Disabling `xsd2ttcn\' tests to save some time')
-        utils.run_cmd('mv %s %s.orig' \
-                      % (regtest_makefile_name.split('.')[0], \
-                      regtest_makefile_name.split('.')[0]))
-        utils.run_cmd('cat %s.orig | sed s/\'xsdConverter\'/\'\'/ >%s' \
-                      % (regtest_makefile_name.split('.')[0], \
-                      regtest_makefile_name.split('.')[0]))
-    self.config_pdfs(config, build_dir)
-    self.logger.debug('`%s/TTCNv3\' was configured and ready to build, all ' \
-                      'Makefiles were modified successfully' % build_dir)
-    return 0  # Allow the caller to catch errors.  Use exceptions later
-    # instead.  Only `./TTCNv3' and `./TTCNv3/Makefile.cfg' is necessary for a
-    # successful configuration.  Other Makefiles can be missing.
-
-  def config_pdfs(self, config, build_dir):
-    """ Optionally, copy .pdf files to the documentation directory or create
-        fake .pdf files to make the installation successful.  If the build
-        configuration doesn't have the appropriate key nothing is done with
-        .pdf files.  If the directory of .pdf files doesn't exists the .pdf
-        files will be faked.
-
-        Arguments:
-          The actual build configuration.
-    """
-    if 'pdfdir' in config:
-      if not os.path.isdir(config['pdfdir']):
-        self.logger.debug('Creating fake .pdf files in %s/TTCNv3/usrguide' % build_dir)
-        for file in os.listdir('%s/TTCNv3/usrguide' % build_dir):
-          if file.endswith('.doc'):
-            utils.run_cmd('touch %s/TTCNv3/usrguide/%s.pdf' \
-                          % (build_dir, file.split('.')[0]))
-      else:
-        self.logger.debug('Copying .pdf files from %s' % config['pdfdir'])
-        utils.run_cmd('cp %s/*.pdf %s/TTCNv3/usrguide' % (config['pdfdir'], build_dir))
-    else:
-      self.logger.debug('The .pdf files are not in place, your ' \
-                        'installation will fail if you haven\'t fixed the ' \
-                        'Makefile...')
-
-  def dump_addressees(self):
-    for addressee in self.config.recipients:
-      print('%s %s' % (addressee, self.config.recipients[addressee]))
-
-  def dump_configs(self):
-    configs = self.config.configs
-    slaves = self.config.slaves
-    for config_name, config_data in configs.iteritems():
-      slave_list = []
-      for slave_name, slave_data in slaves.iteritems():
-        if config_name in slave_data['configs']:
-          slave_list.append(slave_name)
-      print('%s %s' % (config_name, ', '.join(slave_list)))
-
-def main(argv = None):
-  if argv is None:
-    argv = sys.argv
-  usage = 'Usage: %prog [options]'
-  version = '%prog 0.0.5'
-  parser = optparse.OptionParser(usage = usage, version = version)
-  parser.add_option('-a', '--addressees', action = 'store_true', dest = 'addressees', help = 'dump all addressees')
-  parser.add_option('-A', '--set-addressees', action = 'store', type = 'string', dest = 'set_addressees', help = 'set addressees from command line')
-  parser.add_option('-c', '--config-list', action = 'store', type = 'string', dest = 'config_list', help = 'list of build configurations')
-  parser.add_option('-d', '--dump', action = 'store_true', dest = 'dump', help = 'dump build configurations and the attached slaves', default = False)
-  parser.add_option('-p', '--source-path', action = 'store', type = 'string', dest = 'source_path', help = 'instead of CVS use the given path')
-  parser.add_option('-r', '--reset', action = 'store_true', dest = 'reset', help = 'reset statistics', default = False)
-  parser.add_option('-s', '--slave-mode', action = 'store', type = 'string', dest = 'slave_mode', help = 'enable slave mode', default = None)
-  parser.add_option('-t', '--tests', action = 'store', type = 'string', dest = 'tests', help = 'tests to run') 
-  (options, args) = parser.parse_args()
-  # The slaves are always executing a specific build configuration.
-  if not options.config_list and options.slave_mode:
-    parser.print_help()
-  elif options.addressees:
-    titan_builder().dump_addressees()
-  elif options.dump:
-    titan_builder().dump_configs()
-  else:
-    builder = titan_builder()
-    builder.build(options.config_list, options.slave_mode, options.reset, options.set_addressees, options.tests, options.source_path)
-  return 0
-
-if __name__ == '__main__':
-  ret_val = 1
-  try:
-    ret_val = main()
-  except SystemExit, e:
-    ret_val = e
-  except:
-    print('Exception caught, writing traceback info to log file `%s\'' \
-          % TRACE_FILENAME)
-    traceback.print_exc(file = open(TRACE_FILENAME, 'at'))
-    sys.exit(1)  # Don't fall through.
-  sys.exit(ret_val)
diff --git a/etc/autotest/titan_builder.sh b/etc/autotest/titan_builder.sh
deleted file mode 100755
index 6d1ddeb82..000000000
--- a/etc/autotest/titan_builder.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash -x
-###############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#   Kovacs, Ferenc
-#
-###############################################################################
-
-
-# Load platform specific settings.
-CONFIG_FILE=${HOME}/.titan_builder
-if [ -f ${CONFIG_FILE} ]; then . ${CONFIG_FILE}; fi
-
-if [ ! -n "`mount | grep \"/view/eferkov_tcc/vobs/ttcn\"`" ]; then sshfs -o ro,reconnect,transform_symlinks titanrt@147.214.15.153:/view/eferkov_tcc/vobs/ttcn /home/titanrt/titan_nightly_builds/vobs/ttcn; fi
-if [ ! -n "`mount | grep \"/proj/TTCN/www/ttcn/root/titan-testresults\"`" ]; then sshfs -o reconnect,transform_symlinks titanrt@147.214.15.96:/proj/TTCN/www/ttcn/root/titan-testresults /home/titanrt/titan_nightly_builds/web; fi
-
-./titan_builder.py "$@"
diff --git a/etc/autotest/titan_builder_cfg.py b/etc/autotest/titan_builder_cfg.py
deleted file mode 100755
index 2478efe17..000000000
--- a/etc/autotest/titan_builder_cfg.py
+++ /dev/null
@@ -1,793 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#   Beres, Szabolcs
-#   Kovacs, Ferenc
-#   Raduly, Csaba
-#
-##############################################################################
-#!/usr/bin/env python
-
-import os
-
-USER = os.environ.get('USER', 'titanrt')
-BASEDIR = os.environ.get('HOME', '/home/%s' % USER)
-
-# Sending notifications and the generation of HTML is always done.  It's not a
-# configurable option.
-#
-# Brief description of options:
-#
-#   buiddir=['...'|''] The build directory of the master.
-#   logdir=['...'|'']  The logs of the master go here.
-#   htmldir=['...'|''] All HTML files will be published here.
-#   vob=['...'|'']     The VOB products will be copied from here.
-#   archive=[#]        Archive the logs after a specified number of days.
-#   cleanup=[#]        Move logs to a safe place.
-#   measureperiod=[#]  Reset scores after a given number of days.
-#
-# It's important to use different directories for the master and the slaves.
-# Especially for local builds.
-
-common = {
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_master'),
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_master/logs'),
-  'htmldir':os.path.join(BASEDIR, 'titan_nightly_builds/web/titan_builds'),
-  'vob':os.path.join(BASEDIR, 'titan_nightly_builds/vobs/ttcn/TCC_Common'),
-  'archive':4,
-  'cleanup':8,
-  'cleanupslave':{'slave':'tcclab1', 'dir':os.path.join(BASEDIR, 'titan_nightly_builds/archives')},
-  'measureperiod':30
-}
-
-###############################
-# Description of recipients.  #
-###############################
-
-recipients = {
-  'Adam Delic':'<adam.delic@ericsson.com>',
-  'Csaba Raduly':'<csaba.raduly@ericsson.com>',
-  'Elemer Lelik':'<elemer.lelik@ericsson.com>',
-  'Gabor Szalai':'<gabor.szalai@ericsson.com>',
-  'Gyorgy Rethy':'<gyorgy.rethy@ericsson.com>',
-  'Gyula Koos':'<gyula.koos@ericsson.com>',
-  'Jeno Balasko':'<jeno.balasko@ericsson.com>',
-  'Kristof Szabados':'<kristof.szabados@ericsson.com>',
-  'Krisztian Pandi':'<krisztian.pandi@ericsson.com>',
-  'Matyas Ormandi':'<matyas.ormandi@ericsson.com>',
-  'Szabolcs Beres':'<szabolcs.beres@ericsson.com>',
-  'Tibor Csondes':'<tibor.csondes@ericsson.com>',
-  'Zsolt Szego':'<zsolt.szego@ericsson.com>',
-  'a':'<bcdefghijklmnopqrstuvwxyz>'
-}
-
-###########################
-# Description of slaves.  #
-###########################
-# The whole script will be copied to the target machine and the specified
-# build configuration will be run.  To disable a slave simply comment it out.
-# The password-less `ssh' is a requirement here.  The scheduling is done by
-# `cron'.  If the slave is unreachable it will be skipped due to a timeout.
-# Everything will be written to a logfile.
-#
-# Currently, one configuration is supported for each slave.
-#
-# A new directory will be created by the master with a name specified in the
-# given build configuration.  The build will run from that very directory on
-# the slave.
-
-slaves = {}
-
-slaves['tcclab1'] = {'ip':os.environ.get('TCCLAB1_IP', '172.31.21.7'), 'user':USER, 'configs':['x86_64_linux_tcclab1', 'vobtests_on_x86_64_linux_tcclab1']}
-slaves['tcclab2'] = {'ip':os.environ.get('TCCLAB2_IP', '172.31.21.49'), 'user':USER, 'configs':['x86_64_linux_tcclab2']}
-slaves['tcclab3'] = {'ip':os.environ.get('TCCLAB3_IP', '172.31.21.8'), 'user':USER, 'configs':['x86_64_linux_tcclab3', 'x86_64_linux_tcclab3_your_last_chance']}
-slaves['tcclab4'] = {'ip':os.environ.get('TCCLAB4_IP', '172.31.21.10'), 'user':USER, 'configs':['x86_64_linux_tcclab4']}
-slaves['tcclab5'] = {'ip':os.environ.get('TCCLAB5_IP', '172.31.21.9'), 'user':USER, 'configs':['x86_64_linux_tcclab5', 'x86_64_linux_tcclab5_clang']}
-slaves['mwlx122'] = {'ip':os.environ.get('MWLX122_IP', '159.107.148.32'), 'user':USER, 'configs':[]}
-slaves['esekits3013'] = {'ip':os.environ.get('ESEKITS3013_IP', '147.214.15.172'), 'user':USER, 'configs':['sparc_solaris_esekits3013', 'vobtests_on_sparc_solaris_esekits3013']}
-slaves['esekilxxen1843'] = {'ip':os.environ.get('ESEKILXXEN1843_IP', '147.214.13.100'), 'user':USER, 'configs':['x86_64_linux_esekilxxen1843']}
-slaves['mwux054'] = {'ip':os.environ.get('MWUX054_IP', '159.107.194.67'), 'user':USER, 'configs':[]}
-slaves['bangjohansen'] = {'ip':os.environ.get('BANGJOHANSEN_IP', '172.31.21.76'), 'user':USER, 'configs':['i386_solaris_bangjohansen']}
-
-#############################
-# Description of products.  #
-#############################
-# The list of VOB-products to run.  The `product' is coming from the directory
-# name of the product in CC.  The columns are describing the following in
-# order (the `run' is very unrealistic at the moment):
-#   `semantic': Run semantic checks only on all source files.
-#   `translate': Code generation as well.
-#   `compile': The generated code is compiled and the executable is created.
-#   `run': Try to run the test in some way.
-# False/True = disable/enable a specific phase.  All interesting products
-# should be listed here.
-# Possible scenarios:
-# 1) We look for a `demo' directory under `product'.  If it doesn't exist most
-#    of the phases will fail.  Otherwise go to 2).
-# 2) Find the necessary files and try to reconstruct the product using a build
-#    file or just simply using the list of files.  The .prj file always have
-#    the top priority.  If it's not present the Makefile is examined.
-#    Otherwise, the files in `demo' and `src' will be copied and a Makefile
-#    will be generated for them.  (If there's a Makefile it will be regenerated
-#    too.)  A new Makefile needs to be generated in all cases.
-# 3) The files are in our ${HOME}, they're ready to be distributed.
-# 4) Run the phases specified in this file.
-#
-# Using a list of products is better then having an exception list.  In this
-# way everything is under control.  For products with .ttcnpp/.ttcnin files,
-# the `semantic' and `translate' passes are disabled.  They don't really make
-# sense without a Makefile, which calls `cpp' first.  Support for automatic
-# detection of preprocessable files is missing.
-
-products = {}
-
-products['TestPorts'] = [
-  {'name':'AGTproc_CNL113391',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'ASISmsg_CNL113338',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'CPDEBUG_OIPmsg_CNL113381',  'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'CPDEBUG_PLEXmsg_CNL113324', 'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'CPDEBUG_RESmsg_CNL113339',  'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'DIAMETERmsg_CNL113310',     'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'EINMGRasp_CNL113468',       'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'EPmsg_CNL113406',           'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'GMLOGmsg_CNL113351',        'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'GiGnREPLAYasp_CNL113604',   'semantic':False, 'translate':False, 'compile':False, 'run':False},
-  {'name':'H225v5msg_CNL113486',       'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'HTTPmsg_CNL113312',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'IPL4asp_CNL113531',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LANL2asp_CNL113519',        'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LAPDasp_Q.921_CNL113436',   'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LDAPasp_RFC4511_CNL113513', 'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LDAPmsg_CNL113385',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LLCasp_CNL113343',          'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'LOADMEASasp_CNL113585',     'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'M2PAasp_CNL113557',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'MMLasp_CNL113490',          'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'MTP3asp_CNL113337',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'MTP3asp_EIN_CNL113421',     'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'NSasp_CNL113386',           'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'PCAPasp_CNL113443',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'PIPEasp_CNL113334',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'RLC_RNC_host',              'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'RPMOmsg_CNL113350',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'Rexec_Rshmsg_CNL113476',    'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SCCPasp_CNL113348',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SCTPasp_CNL113469',         'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'SCTPproc_CNL113409',        'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'SEA_IPasp_CNL113544',       'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'SEA_NWMasp_CNL113586',      'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SEA_OCPasp_CNL113612',      'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SIPmsg_CNL113319',          'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SMPPmsg_CNL113321',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SNMPmsg_CNL113344',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SSHCLIENTasp_CNL113484',    'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SSHSERVERasp_CNL113489',    'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'STDINOUTmsg_CNL113642',     'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SUAasp_CNL113516',          'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'SUNRPCasp_CNL113493',       'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'TCAPasp_CNL113349',         'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'TCPasp_CNL113347',          'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'TELNETasp_CNL113320',       'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'UDPasp_CNL113346',          'semantic':True,  'translate':True,  'compile':True,  'run':False},
-  {'name':'XTDPasp_CNL113494',         'semantic':True,  'translate':True,  'compile':False, 'run':False}
-]
-
-products['ProtocolModules'] = [
-  {'name':'ABM_RealTime_3.0_CNL113524 ',     'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'ACR_v1.1_CNL113680',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ADMS_UPG_CNL113555',              'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'AIN_v2.0_CNL113556',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_ANSI_CNL113397',             'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_Brazil_CNL113403',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_China_CDMA_CNL113441',       'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_China_CNL113402',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_Q.1902.1_CNL113359',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_TTC_CNL113416',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BICC_UK_CNL113401',               'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BNSI_CNL113475',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSAPP_v5.4.0_CNL113373',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSAPP_v5.5.0_CNL113470',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSGP_v5.9.0_CNL113388',          'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSGP_v6.12.0_CNL113497',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSGP_v6.7.0_CNL113445',          'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSMAP_v4.8.0_CNL113413',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'BSSMAP_v6.3.0_CNL113361',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAI3G1.1_UPG1.0_CNL113549',       'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'CAI3G1.2_SP_UP_EMA5.0_CNL113551', 'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'CAI3G_CNL113423',                 'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'CAI3G_v1.1_CNL113511',            'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'CAI_CNL113422',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAI_CNL113502',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAI_CNL113504',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAI_MINSAT_CNL113548',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAP_v5.4.0_CNL113374',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAP_v5.6.1_CNL113510',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAP_v610_CNL113358',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAP_v6.4.0_CNL113433',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CAP_v7.3.0_CNL113581',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CCAPI_MINSAT_531_CNL113546',      'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CDR_v6.1.0_CNL113505',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CDR_v8.5.0_CNL113665',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CIP_CS3.0Y_CNL113506',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'CIP_CS4.0_CNL113535',             'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DASS2_CNL113464',                 'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DHCP_CNL113461',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DNS_CNL113429',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DPNSS_CNL113455',                 'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DSS1_ANSI_CNL113481',             'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'DUA_CNL113449',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'EricssonRTC_CNL113414',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'Ericsson_INAP_CS1plus_CNL113356', 'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GCP_31r1_CNL113364',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GMLOG_CNL113408',                 'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTP97_v6.11.0_CNL113379',         'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTPP_v6.0.0_CNL113448',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTPP_v7.7.0_CNL113376',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTP_v5.6.0_CNL113375',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTP_v6.11.0_CNL113499',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTP_v6.7.0_CNL113446',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'GTP_v7.6.0_CNL113559',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'H225.0_v0298_CNL113354',          'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'H225.0_v10_CNL113479',            'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'H245_v5_CNL113480',               'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'H248_v2_CNL113424',               'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ICMP_CNL113529',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'IP_CNL113418',                    'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_ANSI_CNL113411',             'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_Brazil_CNL113400',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_China_CDMA_CNL113442',       'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_China_CNL113399',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_Q.762_CNL113365',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_TTC_CNL113417',              'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ISUP_UK_CNL113398',               'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'IUA_CNL113439',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'IUP_CNL113554',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'M3UA_CNL113536',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'MAP_v5.6.1_CNL113372',            'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'MAP_v6.11.0_CNL113500',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'MAP_v7.12.0_CNL113635',           'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'MobileL3_v5.10.0_CNL113471',      'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'MobileL3_v7.8.0_CNL113561',       'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'RANAP_v6.4.1_CNL113434',          'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'RANAP_v6.8.0_CNL113498',          'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'RANAP_v6.9.0_CNL113527',          'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'ROHC_CNL113426',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'SGsAP_v8.3.0_CNL113668',          'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'SGsAP_v9.0.0_CNL113684',          'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'SOAP_MMS_CNL113518',              'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'STUN_CNL113644',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'TBCP_CNL113463',                  'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'TRH_AXD7.5_CNL113574',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'TRH_CNL113485',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'TRH_TSS4.0_CNL113547',            'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'UDP_CNL113420',                   'semantic':True, 'translate':True, 'compile':True,  'run':False},
-  {'name':'ULP_CNL113457',                   'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'XCAP_CNL113460',                  'semantic':True, 'translate':True, 'compile':False, 'run':False},
-  {'name':'XML_RPC_CNL113488',               'semantic':True, 'translate':True, 'compile':False, 'run':False}
-]
-
-products['ProtocolEmulations'] = [
-  {'name':'M3UA_CNL113537', 'semantic':True,  'translate':True,  'compile':False, 'run':False},
-  {'name':'SCCP_CNL113341', 'semantic':False, 'translate':False, 'compile':True,  'run':False},
-  {'name':'TCAP_CNL113342', 'semantic':True,  'translate':True,  'compile':True,  'run':False}
-]
-
-products['Servers'] = []
-
-products['Libraries'] = []
-
-products['Applications'] = []
-
-#########################################
-# Description of build configurations.  #
-#########################################
-# The build configurations are slave specific and these can be overridden
-# through command line arguments.  Add configuration inheritance to avoid
-# redundancy.  The build directory will stay intact until the next build to
-# help debugging.  The next build will wipe out everything.
-#
-# Brief description of options:
-#
-#   version=['...'|'']         Version of TITAN to use.  It can be a CVS tag or
-#                              date.  If it's not set the HEAD will be taken.
-#   license=['...'|'']         Location of the license file.
-#   gui=[True|False]           The `GUI' part in Makefile.cfg.
-#   jni=[True|False]           The `JNI' part in Makefile.cfg.
-#   debug=[True|False]         The `DEBUG' part in Makefile.cfg.
-#   compilerflags=['...'|'']   The `COMPILERFLAGS' in Makefile.cfg.
-#   ldflags=['...'|'']         The `LDFLAGS' in Makefile.cfg.
-#   gccdir=['...'|'']          This will affect `CC' and `CXX'.
-#   *cc=['...'|'']             Value of `CC' in synch with the previous option.
-#   *cxx=['...'|'']            Value of `CXX' in synch with the previous option.
-#   qtdir=['...'|'']           For `QTDIR'.
-#   xmldir=['...'|'']          For `XMLDIR'.
-#   openssldir=['...'|'']      For `OPENSSL_DIR'.
-#   flex=['...'|'']            Replace `FLEX'.
-#   perl=['...'|'']            Location of the `perl' interpreter.
-#   bison=['...'|'']           Replace `BISON'.
-#   regtest=[True|False]       Run regression tests.
-#   perftest=[True|False]      Run performance tests.  The location of the
-#                              testsuite must be known, since it's not part of
-#                              CVS.  It should be available for the master
-#                              locally.
-#   perftestdir=['...'|'']     Location of the performance tests.
-#   *cpsmin=[#]                Minimum CPS value for performance tests.
-#   *cpsmax=[#]                Maximum CPS value for performance tests.
-#   functest=[True|False]      Run function tests.
-#   vobtest=[True|False]       Run product tests.
-#   *vobtest_logs=[True|False] Save logs for product tests.
-#   rt2=[True|False]           Run tests with both run-times.
-#   builddir=['...'|'']        Everything will be done here.  It should be
-#                              different from the master's.
-#   installdir=['...'|'']      The `TTCN3_DIR' variable.  It's never removed.
-#   logdir=['...'|'']          Place of the logs.
-#   *pdfdir=['...'|'']         Local directory to copy .pdf files from.  If not
-#                              present no .pdf files will be there.  If it's an
-#                              empty string the .pdf files will be faked with
-#                              empty files.
-#   *xsdtests=[True|False]     Disable regression tests for `xsd2ttcn'.  It's
-#                              very time consuming.
-#   *foa=[True|False]          The builds are left in a directory.
-#   *foadir=['...'|'']         Link location of the latest build for FOA.  If
-#                              not set, its value will be equal to `installdir'.
-#   *measure=[True|False]      Enable `quality' measurements.
-#   *eclipse=[True|False]      Enable Eclipse build.
-#
-# The results will be sent back to the originating machine, the master.  It
-# will assemble the notifications and generate HTML pages.  Make sure that the
-# build and log directories are always unique!
-
-configs = {}
-
-configs['x86_64_linux_esekilxxen1843'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',  # Or date in format `YYYYMMDD'.
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'$(MINGW) -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/usr/lib64/jvm/java-1.6.0-sun-1.6.0',
-  'qtdir':'/usr/lib64/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':2000,
-  'cpsmax':7000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_esekilxxen1843'),
-  'installdir':'/home/titanrt/TTCNv3-bleedingedge-x86_64_linux_esekilxxen1843',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_esekilxxen1843'),
-  'measure':False,
-  'eclipse':True
-}
-
-configs['sparc_solaris_esekits3013'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',  # Or date in format `YYYYMMDD'.
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'SOLARIS8',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'-Wall',
-  'ldflags':'$(MINGW)',
-  'gccdir':'/proj/TTCN/Tools/gcc-3.4.6-sol8',
-  'flex':'/proj/TTCN/Tools/flex-2.5.35/bin/flex',
-  'bison':'/proj/TTCN/Tools/bison-2.4.3/bin/bison',
-  'jdkdir':'/proj/TTCN/Tools/jdk1.6.0_23',
-  'qtdir':'/proj/TTCN/Tools/qt-x11-free-3.3.8-gcc3.4.6-sol8',
-  'xmldir':'/proj/TTCN/Tools/libxml2-2.7.8',
-  'openssldir':'/proj/TTCN/Tools/openssl-0.9.8r',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/proj/TTCN/Tools/perl-5.10.1/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':2000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/sparc_solaris_esekits3013'),
-  'installdir':'/home/titanrt/TTCNv3-bleedingedge-sparc_solaris_esekits3013',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/sparc_solaris_esekits3013'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab1'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  # For Linux platforms the `-lncurses' is added automatically to the
-  # appropriate Makefile.  (This should be part of the CVS instead.)
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/mnt/TTCN/Tools/jdk1.6.0_14',
-  'qtdir':'/usr/lib/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':4000,
-  'cpsmax':9000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab1'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab1'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab2'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':True,
-  # For Linux platforms the `-lncurses' is added automatically to the
-  # appropriate Makefile.  (This should be part of the CVS instead.)
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/mnt/TTCN/Tools/jdk1.6.0_14',
-  'qtdir':'/mnt/TTCN/Tools/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':6000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab2'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab2'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab3'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  # For Linux platforms the `-lncurses' is added automatically to the
-  # appropriate Makefile.  (This should be part of the CVS instead.)
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/mnt/TTCN/Tools/jdk1.6.0_14',
-  'qtdir':'/usr/lib/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':6000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab3'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab3'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab3_your_last_chance'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  # For Linux platforms the `-lncurses' is added automatically to the
-  # appropriate Makefile.  (This should be part of the CVS instead.)
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/mnt/TTCN/Tools/jdk1.6.0_14',
-  'qtdir':'/usr/lib/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':6000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab3_your_last_chance'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab3_your_last_chance'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab5_clang'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/local/ecsardu',
-  'cc':'clang',
-  'cxx':'clang++',  # It's just a link.
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/usr/lib64/jvm/java-1.6.0',
-  'qtdir':'/usr/lib64/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':4000,
-  'cpsmax':9000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab5_clang'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge-clang',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab5_clang'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab4'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/usr/lib/jvm/java-1.6.0-openjdk',
-  'qtdir':'/mnt/TTCN/Tools/qt',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':6000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab4'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab4'),
-  'measure':False
-}
-
-configs['x86_64_linux_tcclab5'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'/usr/lib64/jvm/java-1.6.0',
-  'qtdir':'/usr/lib64/qt3',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':True,
-  'perftest':True,
-  'functest':True,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':4000,
-  'cpsmax':9000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/x86_64_linux_tcclab5'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/x86_64_linux_tcclab5'),
-  'measure':False
-}
-
-# On this platform we need to force `SOLARIS8', otherwise we get:
-# g++ -c -DNDEBUG -I /mnt/TTCN/Tools/libxml2-2.7.1/include/libxml2 -DSOLARIS -DLICENSE -I/mnt/TTCN/Tools/openssl-0.9.8k/include -I../common -DUSES_XML -Wall -Wno-long-long -O2 Communication.cc
-# Communication.cc: In static member function `static void TTCN_Communication::connect_mc()':
-# Communication.cc:242: error: invalid conversion from `int*' to `socklen_t*'
-# Communication.cc:242: error: initializing argument 3 of `int getsockname(int, sockaddr*, socklen_t*)'
-# Communication.cc: In static member function `static boolean TTCN_Communication::increase_send_buffer(int, int&, int&)':
-# Communication.cc:409: error: invalid conversion from `int*' to `socklen_t*'
-configs['i386_solaris_bangjohansen'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'SOLARIS8',
-  'gui':True,
-  'jni':True,
-  'debug':False,
-  'compilerflags':'-Wall',
-  'ldflags':'',
-  'gccdir':'/mnt/TTCN/Tools/gcc-3.4.6-sol10',
-  'flex':'/mnt/TTCN/Tools/flex-2.5.33/bin/flex',
-  'bison':'/mnt/TTCN/Tools/bison-2.3/bin/bison',
-  'jdkdir':'/mnt/TTCN/Tools/jdk1.6.0',
-  'qtdir':'/mnt/TTCN/Tools/qt-x11-free-3.3.8-gcc4.1.1-sol10',
-  'xmldir':'',
-  'openssldir':'',
-  'regtest':True,
-  'perftest':False,
-  'functest':False,
-  'vobtest':False,
-  'vobtest_logs':False,
-  'rt2':True,
-  'xsdtests':True,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'cpsmin':1000,
-  'cpsmax':6000,
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/i386_solaris_bangjohansen'),
-  'installdir':'/mnt/TTCN/Releases/TTCNv3-bleedingedge',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/i386_solaris_bangjohansen'),
-  'measure':False
-}
-
-configs['vobtests_on_sparc_solaris_esekits3013'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'SOLARIS8',
-  'gui':False,
-  'jni':False,
-  'debug':False,
-  'compilerflags':'-Wall',
-  'ldflags':'$(MINGW)',
-  'gccdir':'/proj/TTCN/Tools/gcc-3.4.6-sol8',
-  'flex':'/proj/TTCN/Tools/flex-2.5.35/bin/flex',
-  'bison':'/proj/TTCN/Tools/bison-2.4.3/bin/bison',
-  'jdkdir':'/proj/TTCN/Tools/jdk1.6.0_23',
-  'qtdir':'/proj/TTCN/Tools/qt-x11-free-3.3.8-gcc3.4.6-sol8',
-  'xmldir':'/proj/TTCN/Tools/libxml2-2.7.8',
-  'openssldir':'/proj/TTCN/Tools/openssl-0.9.8r',
-  'regtest':False,
-  'perftest':False,
-  'functest':False,
-  'vobtest':True,
-  'vobtest_logs':True,
-  'rt2':True,
-  'xsdtests':False,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/proj/TTCN/Tools/perl-5.10.1/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/vobtests_on_sparc_solaris_esekits3013'),
-  'installdir':'/home/titanrt/TTCNv3-bleedingedge-vobtests_on_sparc_solaris_esekits3013',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/vobtests_on_sparc_solaris_esekits3013'),
-  'measure':False
-}
-
-configs['vobtests_on_x86_64_linux_tcclab1'] = {
-  'foa':False,
-  'foadir':'',
-  'version':'',
-  'license':'/home/titanrt/license_8706.dat',
-  'platform':'LINUX',
-  'gui':False,
-  'jni':False,
-  'debug':False,
-  'compilerflags':'-Wall -fPIC',
-  'ldflags':'$(MINGW) -fPIC',
-  'gccdir':'/usr',
-  'flex':'/usr/bin/flex',
-  'bison':'/usr/bin/bison',
-  'jdkdir':'',
-  'qtdir':'',
-  'xmldir':'default',
-  'openssldir':'default',
-  'regtest':False,
-  'perftest':False,
-  'functest':False,
-  'vobtest':True,
-  'vobtest_logs':True,
-  'rt2':True,
-  'xsdtests':False,
-  'pdfdir':os.path.join(BASEDIR, 'docs/TTCNv3-1.8.pl5'),
-  'perl':'/usr/bin/perl',
-  'perftestdir':os.path.join(BASEDIR, 'titan_nightly_builds/balls/perftest-20090927.tar.bz2'),
-  'builddir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/vobtests_on_x86_64_linux_tcclab1'),
-  'installdir':'/tmp/TTCNv3-bleedingedge-vobs',
-  'logdir':os.path.join(BASEDIR, 'titan_nightly_builds/build_slave/logs/vobtests_on_x86_64_linux_tcclab1'),
-  'measure':False
-}
diff --git a/etc/autotest/titan_publisher.py b/etc/autotest/titan_publisher.py
deleted file mode 100755
index d86f42db0..000000000
--- a/etc/autotest/titan_publisher.py
+++ /dev/null
@@ -1,1291 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   
-#   Balasko, Jeno
-#   Kovacs, Ferenc
-#
-##############################################################################
-import os, re, types, time
-import utils
-
-class titan_publisher:
-  def __init__(self, logger, config):
-    self._logger = logger
-    self._config = config
-    
-    self._plotter = plotter(self._logger, self._config)
-
-    self._platform = None
-    self._titan = None
-    self._regtest = None
-    self._perftest = None
-    self._eclipse = None
-    self._functest = None
-    self._vobtest = None
-
-  def __str__(self):
-    return self.as_text()
-
-  def titan_out(self, config, slave_name, titan_out):
-    """ Write TITAN results to file.  """
-    if not self._titan:
-      self._titan = titan_out
-      if not self._titan:
-        return
-      log_dir = os.path.join(config.get('logdir', ''), slave_name)
-      (stamp_begin, stamp_end, \
-        ((ret_val_dep, stdout_dep, stderr_dep), \
-         (ret_val_make, stdout_make, stderr_make), \
-         (ret_val_install, stdout_install, stderr_install))) = self._titan
-      file_dep = open('%s/titan.dep' % log_dir, 'wt')
-      file_make = open('%s/titan.make' % log_dir, 'wt')
-      file_install = open('%s/titan.install' % log_dir, 'wt')
-      file_dep.write(''.join(stdout_dep))
-      file_make.write(''.join(stdout_make))
-      file_install.write(''.join(stdout_install))
-      file_dep.close()
-      file_make.close()
-      file_install.close()
-    else:
-      self._logger.error('More than one TITAN builds are not allowed in the ' \
-                         'build cycle, ignoring the results')
-
-  def regtest_out(self, config, slave_name, regtest_out):
-    """ Write regression test results to file.  """
-    if not self._regtest:
-      self._regtest = regtest_out
-      if not self._regtest:
-        return
-      log_dir = os.path.join(config.get('logdir', ''), slave_name)
-      for rt, rt_data in self._regtest.iteritems():
-        (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
-          (ret_val_run, stdout_run, stderr_run))) = rt_data
-        file_make = open('%s/regtest-make.%s' % (log_dir, rt), 'wt')
-        file_run = open('%s/regtest-run.%s' % (log_dir, rt), 'wt')
-        file_make.write(''.join(stdout_make))
-        file_run.write(''.join(stdout_run))
-        file_make.close()
-        file_run.close()
-    else:
-      self._logger.error('The regression test results are already set')
-
-  def perftest_out(self, config, slave_name, perftest_out):
-    """ Write performance test results to file.  """
-    if not self._perftest:
-      self._perftest = perftest_out
-      if not self._perftest:
-        return
-      log_dir = os.path.join(config.get('logdir', ''), slave_name)
-      for rt, rt_data in self._perftest.iteritems():
-        (stamp_begin, stamp_end, results) = rt_data
-        (ret_val_make, stdout_make, stderr_make) = results.get('make', ([], [], []))
-        file_make = open('%s/perftest.%s' % (log_dir, rt), 'wt')
-        file_make.write(''.join(stdout_make))
-        file_make.close()
-        for run in results.get('run', []):
-          (cps, (ret_val_run, stdout_run, stderr_run)) = run
-          file_run = open('%s/perftest.%s-%d' % (log_dir, rt, cps), 'wt')
-          file_run.write(''.join(stdout_run))
-          file_run.close()
-    else:
-      self._logger.error('The performance test results are already set')
-
-  def eclipse_out(self, config, slave_name, eclipse_out):
-    if not self._eclipse:
-      self._eclipse = eclipse_out
-    else:
-      self._logger.error('The Eclipse build results are already set')
-
-  def functest_out(self, config, slave_name, functest_out):
-    """ Store function test results for publishing.  """
-    if not self._functest:
-      self._functest = functest_out
-    else:
-      self._logger.error('The function test results are already set')
-
-  def vobtest_out(self, config, slave_name, vobtest_out):
-    """ Store VOB test results for publishing.  """
-    if not self._vobtest:
-      self._vobtest = vobtest_out
-    else:
-      self._logger.error('The VOB product results are already set')
-
-  def dump_csv(self, stamp_old, stamp_new, config, config_name, slave_name):
-    out_file = os.path.join(self._config.configs[config_name]['logdir'], \
-                            os.path.join(slave_name, 'report.csv'))
-    try:  
-      out_csv = open(out_file, 'wt')
-      out_csv.write(self.as_csv(stamp_old, stamp_new, config, config_name, slave_name))
-      out_csv.close()
-    except IOError, (errno, strerror):
-      self._logger.error('Cannot open file `%s\': %d: %s' \
-                         % (out_file, errno, strerror))
-
-  def dump_txt(self, stamp_old, stamp_new, config, config_name, slave_name):
-    out_file = os.path.join(self._config.configs[config_name]['logdir'], \
-                            os.path.join(slave_name, 'report.txt'))
-    try:
-      out_txt = open(out_file, 'wt')
-      out_txt.write(self.as_txt(stamp_old, stamp_new, config, config_name, slave_name))
-      out_txt.close()
-    except IOError, (errno, strerror):
-      self._logger.error('Cannot open file `%s\': %d: %s' \
-                         % (out_file, errno, strerror))
-
-  def dump_html(self, stamp_old, stamp_new, config, config_name, slave_name):
-    out_file = os.path.join(self._config.configs[config_name]['logdir'], \
-                            os.path.join(slave_name, 'report.html'))
-    try:
-      out_html = open(out_file, 'wt')
-      out_html.write(self.as_html(stamp_old, stamp_new, config, config_name, slave_name))
-      out_html.close()
-    except IOError, (errno, strerror):
-      self._logger.error('Cannot open file `%s\': %d: %s' \
-                         % (out_file, errno, strerror))
-
-  def as_csv(self, stamp_begin, stamp_end, config, config_name, slave_name):
-    """ Return a very brief summary of the build.  The used runtimes are not
-        distinguished.  Neither the compile time errors and runtime errors.
-        Take care of the (header-)order when adding new columns.
-
-        Arguments:
-          stamp_begin: Start of the whole build.
-          stamp_end: End of the whole build.
-          config: The actual build configuration.
-          config_name: The name of the actual build configuration.
-          slave_name: The name of the actual slave.  It's defined in the
-                      configuration file.
-
-        Returns:
-          The slave specific results in a brief CSV format suitable for
-          notification e-mails.  The master can easily generate a fancy table
-          from this CSV data.
-    """
-    # `gcc' writes to the standard error.
-    results = []
-    uname_out = utils.run_cmd('uname -srmp')[1]
-    gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
-    results.append('%s,%s,%s,%s,%s,%s' \
-                   % (stamp_begin, stamp_end, \
-                   uname_out[0].strip(), gcc_out[0].strip(), \
-                   config_name, slave_name))
-    if self._titan:
-      (stamp_begin, stamp_end, \
-       ((ret_val_dep, stdout_dep, stderr_dep), \
-        (ret_val_make, stdout_make, stderr_make), \
-        (ret_val_install, stdout_install, stderr_install))) = self._titan
-      if ret_val_dep or ret_val_make or ret_val_install:
-        results.append(',1,1,1,1,1,1')
-        return ''.join(results)
-      results.append(',0')
-    else:
-      self._logger.error('The output of TITAN build was not set')
-      results.append(',-1,-1,-1,-1,-1,-1')
-      return ''.join(results)
-    if self._regtest:
-      all_fine = True
-      for rt, rt_data in self._regtest.iteritems():
-        (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
-          (ret_val_run, stdout_run, stderr_run))) = rt_data
-        if ret_val_make or ret_val_run:
-          all_fine = False
-          break
-      results.append(all_fine and ',0' or ',1')
-    else:
-      results.append(',-1')
-    if self._perftest:
-      all_fine = True
-      for rt, rt_data in self._perftest.iteritems():
-        (stamp_begin, stamp_end, compile_run_data) = rt_data
-        (ret_val_make, stdout_make, stderr_make) = compile_run_data['make']
-        if ret_val_make:
-          all_fine = False
-          break
-        for run_data in compile_run_data['run']:
-          (cps, (ret_val_run, stdout_run, stderr_run)) = run_data
-          if ret_val_run:
-            all_fine = False
-            break
-      results.append(all_fine and ',0' or ',1')
-    else:
-      results.append(',-1')
-    if self._functest:
-      all_fine = True
-      for rt, rt_data in self._functest.iteritems():
-        (stamp_begin, stamp_end, functest_data) = rt_data
-        for test, test_results in functest_data.iteritems():
-          (log_file_name, error_file_name) = test_results
-          satester_report = test == 'Config_Parser' or test == 'Semantic_Analyser'
-          if satester_report:
-            log_file = open(log_file_name, 'rt')
-            log_file_data = log_file.readlines()
-            log_file.close()
-            log_file_data.reverse()
-            total_matched = passed = None
-            for line in log_file_data:
-              if not total_matched:
-                total_matched = re.match('^Total number of.*: (\d+)$', line)
-              if not passed:
-                passed = re.match('\s*PASSED.*cases: (\d+)', line)
-              if total_matched and passed:
-                if int(total_matched.group(1)) != int(passed.group(1)):
-                  all_fine = False
-                  break
-            if not total_matched or not passed:
-              self._logger.error('There\'s something wrong with the ' \
-                                 'function test logs, it\'s treated as an ' \
-                                 'error')
-              all_fine = False
-              break
-          else:
-            if error_file_name and os.path.isfile(error_file_name):
-              error_file = open(error_file_name, 'rt')
-              error_file_data = error_file.readlines()
-              error_file.close()
-              if len(error_file_data) != 0:
-                all_fine = False
-                break
-      results.append(all_fine and ',0' or ',1')
-    else:
-      results.append(',-1')
-    if self._vobtest:
-      # Unfortunately there's no `goto' in Python.  However, returning from
-      # multiple loops can be done using exceptions...
-      all_fine = True
-      for rt, rt_data in self._vobtest.iteritems():
-        (stamp_begin, stamp_end, vobtest_data) = rt_data
-        for kind, products in vobtest_data.iteritems():
-          if not len(products) > 0:
-            continue
-          for product in products:
-            for name, name_data in product.iteritems():
-              if not isinstance(name_data, types.DictType):
-                all_fine = False
-                break
-              else:
-                for action, action_data in name_data.iteritems():
-                  if isinstance(action_data, types.TupleType):
-                    (ret_val, output_files, stdout, stderr) = action_data
-                    if ret_val:
-                      all_fine = False
-                      break
-      results.append(all_fine and ',0' or ',1')
-    else:
-      results.append(',-1')
-    if self._eclipse:
-      (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
-      results.append(ret_val_ant and ',1' or ',0')
-    else:
-      results.append(',-1')
-    return ''.join(results)
-
-  def as_txt_regtest(self):
-    result = []
-    for rt, rt_data in self._regtest.iteritems():
-      (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
-        (ret_val_run, stdout_run, stderr_run))) = rt_data
-      result.append('%s [%s - %s] Regression test results for the `%s\' ' \
-                    'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
-                    stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
-      if ret_val_make:
-        result.append('Regression test failed to build:\n\n%s\n' \
-                      % ''.join(stdout_make[-20:]))
-      elif ret_val_run:
-        result.append('Regression test failed to run:\n\n%s\n' \
-                      % ''.join(stdout_run[-20:]))
-      else:
-        result.append('Regression test built successfully.\n\n%s\n' \
-                      % ''.join(stdout_run[-20:]))
-    return ''.join(result)
-
-  def as_txt_perftest(self):
-    result = []
-    for rt, rt_data in self._perftest.iteritems():
-      (stamp_begin, stamp_end, perftest_results) = rt_data
-      result.append('%s [%s - %s] Performance test results for the `%s\' ' \
-                    'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
-                       stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
-      (ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
-      (ret_val_make, stdout_make, stderr_make) = perftest_results['make']
-      run_data = perftest_results['run']
-      if ret_val_dep or ret_val_make:
-        result.append('Performance test failed to build:\n\n%s\n' \
-                      % ''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:]))
-      else:
-        result.append('Performance test compiled successfully.\n\n')
-      for run in run_data:
-        (cps, (ret_val_run, stdout_run, stderr_run)) = run
-        result.append('For `%d\' CPS: ' % cps)
-        if ret_val_run:
-          result.append('Failed\n%s\n\n' % ''.join(stdout_run[-20:]))
-        else:
-          result.append('Succeeded\nExpected Calls/Measured Calls/' \
-                        'Expected CPS/Measured CPS: %s\n' \
-                        % ' '.join(''.join(filter(lambda run_info: \
-                                   'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
-    return ''.join(result)
-
-  def as_txt_eclipse(self):
-    result = []
-    (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
-    result.append('%s [%s - %s] Eclipse build results\n\n'
-                  % (utils.get_time_diff(False, stamp_begin, stamp_end), stamp_begin, stamp_end))
-    f = open(log_file, 'rt')
-    log_file_data = f.readlines()
-    f.close()
-    if ret_val_ant:
-      result.append('Eclipse plug-ins failed to build:\n%s\n\n' \
-                    % ''.join(log_file_data[-20:]))
-    else:
-      result.append('Eclipse plug-ins built successfully.\n\n%s\n' \
-                    % ''.join(log_file_data[-20:]))
-    return ''.join(result)
-
-  def as_txt_functest(self):
-    result = []
-    for rt, rt_data in self._functest.iteritems():
-      (stamp_begin, stamp_end, functest_results) = rt_data
-      result.append('%s [%s - %s] Function test results for the `%s\' runtime\n\n' \
-                    % (utils.get_time_diff(False, stamp_begin, stamp_end), \
-                       stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
-      for function_test, test_results in functest_results.iteritems():
-        (log_file_name, error_file_name) = test_results
-        satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
-        if satester_report:
-          log_file = open(log_file_name, 'rt')
-          log_file_data = log_file.readlines()
-          log_file.close()
-          total_matched = passed = None
-          for line in log_file_data:
-            if not total_matched:
-              total_matched = re.match('^Total number of.*: (\d+)$', line)
-            if not passed:
-              passed = re.match('\s*PASSED.*cases: (\d+)', line)
-            if passed and total_matched:
-              if int(passed.group(1)) == int(total_matched.group(1)):
-                result.append('All `%s\' function tests succeeded.\n' \
-                              % function_test)
-              else:
-                result.append('\n`%s\' function tests failed:\n\n%s\n' \
-                              % (function_test, \
-                                 ''.join(log_file_data[-20:])))
-              break
-        else:
-          if error_file_name and os.path.isfile(error_file_name):
-            error_file = open(error_file_name, 'rt')
-            error_file_data = error_file.readlines()
-            error_file.close()
-            if len(error_file_data) == 0:
-              result.append('All `%s\' function tests succeeded.\n' \
-                            % function_test)
-            else:
-              result.append('\n`%s\' function tests failed:\n\n%s\n' \
-                            % (function_test, \
-                               ''.join(error_file_data[-20:])))
-          else:
-            result.append('All `%s\' function tests succeeded.\n' \
-                          % function_test)
-      result.append('\n')
-    return ''.join(result)
-
-  def as_txt_vobtest(self):
-    result = []
-    header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
-    for rt, rt_data in self._vobtest.iteritems():
-      (stamp_begin, stamp_end, vobtest_results) = rt_data
-      result.append('%s [%s - %s] VOB product results for the %s runtime\n\n' \
-                    % (utils.get_time_diff(False, stamp_begin, stamp_end), \
-                       stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
-      for kind, products in vobtest_results.iteritems():
-        if not len(products) > 0:
-          continue
-        title = 'Results for %d `%s\' products using the %s runtime:' \
-                % (len(products), kind, (rt == 'rt2' and 'function-test' \
-                                         or 'load-test'))
-        result.append('%s\n%s\n' % (title, '-' * len(title)))
-        body = []
-        for product in products:
-          for name, name_data in product.iteritems():
-            row = [name]
-            if not isinstance(name_data, types.DictType):
-              row.extend(['Unavailable'] * (len(header) - 1))
-              body.append(row)
-            else:
-              action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
-              row.extend([''] * len(action_order.keys()))
-              for action, action_data in name_data.iteritems():
-                if not action in action_order.keys():
-                  self._logger.error('Unknown action `%s\'while preparing ' \
-                                     'the text output' % action)
-                  continue
-                action_index = action_order[action]
-                if not isinstance(action_data, types.TupleType):
-                  row[action_index] = 'Disabled'
-                else:
-                  (ret_val, output_files, stdout, stderr) = action_data
-                  row[action_index] = '%s' % (ret_val != 0 and '*Failure*' or 'Success')
-          body.append(row)
-        result.append(self.as_txt_table(header, body) + '\n')
-    return ''.join(result)
-
-  def as_txt(self, stamp_begin, stamp_end, config, config_name, slave_name):
-    """ Return the string representation of the test results.
-    """
-    results = []
-    uname_out = utils.run_cmd('uname -srmp')[1]
-    gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
-    results.append('Platform: %s\nGCC/LLVM version: %s\n\n' \
-                   % (uname_out[0].strip(), gcc_out[0].strip()))
-    if self._titan:
-      (stamp_begin, stamp_end, \
-       ((ret_val_dep, stdout_dep, stderr_dep), \
-        (ret_val_make, stdout_make, stderr_make), \
-        (ret_val_install, stdout_install, stderr_install))) = self._titan
-      results.append('%s [%s - %s] TITAN build\n\n' \
-                     % (utils.get_time_diff(False, stamp_begin, stamp_end), \
-                        stamp_begin, stamp_end))
-      if ret_val_dep or ret_val_make or ret_val_install:
-        # The `stderr' is always redirected to `stdout'.
-        results.append('TITAN build failed, check the logs for further ' \
-                       'investigation...\n\n%s\n' \
-                       % ''.join(stdout_install[-20:]))
-      else:
-        results.append('TITAN build succeeded.\n\n%s\n' \
-                       % utils.get_license_info('%s/bin/compiler' \
-                                                % self._config.configs[config_name]['installdir']))
-    if self._regtest:
-      results.append(self.as_txt_regtest())
-    if self._perftest:
-      results.append(self.as_txt_perftest())
-    if self._eclipse:
-      results.append(self.as_txt_eclipse())
-    if self._functest:
-      results.append(self.as_txt_functest())
-    if self._vobtest:
-      results.append(self.as_txt_vobtest())
-    return ''.join(results)
-
-  def as_txt_table(self, header = None, body = []):
-    """ Create a table like ASCII composition using the given header and the
-        rows of the table.  The header is an optional string list.  If the
-        header is present and there are more columns in the body the smaller
-        wins.
-
-        Arguments:
-          header: The columns of the table.
-          body: Cell contents.
-
-        Returns:
-          The table as a string.
-    """
-    if len(body) == 0 or len(body) != len([row for row in body \
-                                           if isinstance(row, types.ListType)]):
-      self._logger.error('The second argument of `as_text_table()\' must be ' \
-                         'a list of lists')
-      return ''
-    num_cols = len(body[0])
-    max_widths = []
-    if header and len(header) < num_cols:
-      num_cols = len(header)
-    for col in range(num_cols):
-      max_width = -1
-      for row in range(len(body)):
-        if max_width < len(body[row][col]):
-          max_width = len(body[row][col])
-      if header and max_width < len(header[col]):
-        max_width = len(header[col])
-      max_widths.append(max_width + 2)  # Ad-hoc add.
-    ret_val = ''  # Start filling the table.
-    if header:
-      ret_val += ''.join([cell.ljust(max_widths[i]) \
-                          for i, cell in enumerate(header[:num_cols])]) + '\n'
-    for row in range(len(body)):
-      ret_val += ''.join([cell.ljust(max_widths[i]) \
-                          for i, cell in enumerate(body[row][:num_cols])]) + '\n'
-    return ret_val
-
-  def as_html_titan(self, config_name, slave_name):
-    """ Return the HTML representation of the TITAN build results as a string.
-    """
-    result = []
-    (stamp_begin, stamp_end, \
-     ((ret_val_dep, stdout_dep, stderr_dep), \
-      (ret_val_make, stdout_make, stderr_make), \
-      (ret_val_install, stdout_install, stderr_install))) = self._titan
-    result.append('<span class="%s">TITAN build</span><br/><br/>\n' \
-                        % ((ret_val_dep or ret_val_make or ret_val_install) \
-                           and 'error_header' or 'header'))
-    result.append('( `<a href="titan.dep">make dep</a>\' )<br/><br/>\n')
-    result.append('( `<a href="titan.make">make</a>\' )<br/><br/>\n')
-    result.append('( `<a href="titan.install">make install</a>\' )' \
-                  '<br/><br/>\n')
-    result.append('<span class="stamp">%s - %s [%s]</span>\n' \
-                  % (stamp_begin, stamp_end, \
-                     utils.get_time_diff(False, stamp_begin, stamp_end)))
-    result.append('<pre>\n')
-    if ret_val_dep or ret_val_make or ret_val_install:
-      result.append('The TITAN build failed, check the logs for further ' \
-                    'investigation...\n\n%s\n' % self.strip_tags(''.join(stdout_install[-20:])))
-    else:
-      result.append('TITAN build succeeded.\n\n%s\n' \
-                    % self.strip_tags(utils.get_license_info('%s/bin/compiler' \
-                                      % self._config.configs[config_name]['installdir'])))
-    result.append('</pre>\n')
-    return ''.join(result)
-
-  def as_html_regtest(self, config_name, slave_name):
-    """ Return the HTML representation of the regression test results as a
-        string.  The last part of the output is always included.
-    """
-    result = []
-    for rt, rt_data in self._regtest.iteritems():
-      (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
-        (ret_val_run, stdout_run, stderr_run))) = rt_data
-      result.append('<span class="%s">Regression test results for the `%s\' ' \
-                    'runtime</span><br/><br/>\n' \
-                    % (((ret_val_make or ret_val_run) and 'error_header' or 'header'), \
-                       (rt == 'rt2' and 'function-test' or 'load-test')))
-      result.append('( `<a href="regtest-make.%s">make</a>\' )<br/><br/>\n' % rt)
-      result.append('( `<a href="regtest-run.%s">make run</a>\' )<br/><br/>\n' % rt)
-      result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
-                    % (stamp_begin, stamp_end, \
-                       utils.get_time_diff(False, stamp_begin, stamp_end)))
-      if ret_val_make:
-        result.append('Regression test failed to build:\n\n%s\n</pre>\n' \
-                      % self.strip_tags(''.join(stdout_make[-20:])))
-      elif ret_val_run:
-        result.append('Regression test failed to run:\n\n%s\n</pre>\n' \
-                      % self.strip_tags(''.join(stdout_run[-20:])))
-      else:
-        result.append('Regression test built successfully.\n\n%s\n</pre>\n' \
-                      % self.strip_tags(''.join(stdout_run[-20:])))
-    return ''.join(result)
-
-  def as_html_perftest(self, config_name, slave_name):
-    """ Return the HTML representation of the performance test results as a
-        string.  Some logic is included.
-    """
-    result = []
-    for rt, rt_data in self._perftest.iteritems():
-      (stamp_begin, stamp_end, perftest_results) = rt_data
-      (ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
-      (ret_val_make, stdout_make, stderr_make) = perftest_results['make']
-      run_data = perftest_results['run']
-      run_failed = False
-      for run in run_data:
-        (cps, (ret_val_run, stdout_run, stderr_run)) = run
-        if ret_val_run:
-          run_failed = True
-          break
-      result.append(
-        '<span class="%s">Performance test results for the `%s\' ' \
-        'runtime</span><br/><br/>\n' \
-        % (((ret_val_dep or ret_val_make or run_failed) \
-            and 'error_header' or 'header'), \
-           (rt == 'rt2' and 'function-test' or 'load-test')))
-      result.append('( `<a href="perftest.%s">make</a>\' )<br/><br/>\n' % rt)
-      result.append('( `<a href=".">make run</a>\' )<br/><br/>')
-      result.append('<span class="stamp">%s - %s [%s]</span>\n' \
-                    % (stamp_begin, stamp_end, \
-                       utils.get_time_diff(False, stamp_begin, stamp_end)))
-      result.append('<pre>\n')
-      if ret_val_dep or ret_val_make:
-        result.append('Performance test failed to build:\n\n%s\n' \
-                      % self.strip_tags(''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:])))
-      else:
-        result.append('Performance test compiled successfully.\n\n')
-        result.append('<embed src="perftest-stats-%s.svg" width="640" height="480" type="image/svg+xml"/>\n\n' % rt)
-        for run in run_data:
-          (cps, (ret_val_run, stdout_run, stderr_run)) = run
-          if ret_val_run:
-            result.append('Failed for `%d\' CPS.\n\n%s\n\n' \
-                          % (cps, self.strip_tags(''.join(stdout_run[-20:]))))
-          else:
-            result.append('Expected Calls/Measured Calls/' \
-                          'Expected CPS/Measured CPS: %s\n' \
-                          % ' '.join(''.join(filter(lambda run_info: \
-                                     'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
-      result.append('\n</pre>\n')
-    return ''.join(result)
-
-  def as_html_eclipse(self, config_name, slave_name):
-    result = []
-    (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
-    result.append('<span class="%s">Eclipse plug-in build results</span><br/><br/>\n' \
-                  % ((ret_val_ant and 'error_header' or 'header')))
-    result.append('( `<a href="eclipse-mylog.log">ant</a>\' )<br/><br/>\n')
-    result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
-                  % (stamp_begin, stamp_end, \
-                     utils.get_time_diff(False, stamp_begin, stamp_end)))
-    f = open(log_file, 'rt')
-    log_file_data = f.readlines()
-    f.close()
-    if ret_val_ant:
-      result.append('Eclipse plug-ins failed to build:\n\n%s\n</pre>\n' \
-                    % self.strip_tags(''.join(log_file_data[-20:])))
-    else:
-      result.append('Eclipse plug-ins built successfully.\n\n%s\n</pre>\n' \
-                    % self.strip_tags(''.join(log_file_data[-20:])))
-    return ''.join(result)
-
-  def as_html_functest(self, config_name, slave_name):
-    """ Return the HTML representation of the function test results as a
-        string.  Some logic is included.
-    """
-    result = []
-    for rt, rt_data in self._functest.iteritems():
-      (stamp_begin, stamp_end, functest_results) = rt_data
-      any_failure = False
-      result_tmp = []
-      for function_test, test_results in functest_results.iteritems():
-        (log_file_name, error_file_name) = test_results
-        satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
-        if satester_report:
-          log_file = open(log_file_name, 'rt')
-          log_file_data = log_file.readlines()
-          log_file.close()
-          total_matched = passed = None
-          for line in log_file_data:
-            if not total_matched:
-              total_matched = re.match('^Total number of.*: (\d+)$', line)
-            if not passed:
-              passed = re.match('\s*PASSED.*cases: (\d+)', line)
-            if passed and total_matched:
-              if int(passed.group(1)) == int(total_matched.group(1)):
-                result_tmp.append('All `%s\' function tests succeeded.\n' \
-                                  % function_test)
-              else:
-                result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
-                                  % (function_test, \
-                                     self.strip_tags(''.join(log_file_data[-20:]))))
-                any_failure = True
-              break
-        else:
-          if error_file_name and os.path.isfile(error_file_name):
-            error_file = open(error_file_name, 'rt')
-            error_file_data = error_file.readlines()
-            error_file.close()
-            if len(error_file_data) == 0:
-              result_tmp.append('All `%s\' function tests succeeded.\n' \
-                                % function_test)
-            else:
-              result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
-                                % (function_test, \
-                                   self.strip_tags(''.join(error_file_data[-20:]))))
-              any_failure = True
-          else:
-            result_tmp.append('All `%s\' function tests succeeded.\n' \
-                              % function_test)
-      result.append('<span class="%s">Function test results for the ' \
-                    '`%s\' runtime</span><br/><br/>\n' \
-                    % ((any_failure and 'error_header' or 'header'), \
-                       (rt == 'rt2' and 'function-test' or 'load-test')))
-      result.append('( `<a href=".">make all</a>\')<br/><br/>\n')
-      result.append('<span class="stamp">%s - %s [%s]</span>\n' \
-                    % (stamp_begin, stamp_end, \
-                       utils.get_time_diff(False, stamp_begin, stamp_end)))
-      result.append('<pre>\n')
-      result.extend(result_tmp)
-      result.append('\n</pre>\n')
-    return ''.join(result)
-
-  def as_html_vobtest(self, config_name, slave_name):
-    """ Return the HTML representation of the VOB product tests as a string.
-        Some logic is included.
-    """
-    result = []
-    header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
-    for rt, rt_data in self._vobtest.iteritems():
-      (stamp_begin, stamp_end, vobtest_results) = rt_data
-      any_failure = False
-      result_tmp = []
-      for kind, products in vobtest_results.iteritems():
-        if not len(products) > 0:
-          continue
-        body = []
-        for product in products:
-          for name, name_data in product.iteritems():
-            row = [name]
-            if not isinstance(name_data, types.DictType):
-              row.extend(['Unavailable'] * (len(header) - 1))
-              body.append(row)
-              any_failure = True
-            else:
-              action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
-              row.extend([''] * len(action_order.keys()))
-              for action, action_data in name_data.iteritems():
-                if not action in action_order.keys():
-                  self._logger.error('Unknown action `%s\'while preparing ' \
-                                     'the HTML output' % action)
-                  continue
-                action_index = action_order[action]
-                if not isinstance(action_data, types.TupleType):
-                  row[action_index] = 'Disabled'
-                else:
-                  (ret_val, output_files, stdout, stderr) = action_data
-                  row[action_index] = (ret_val and '*Failure*' or 'Success')
-                  if ret_val:
-                    any_failure = True
-          body.append(row)
-        title = 'Results for %d `%s\' products using the %s runtime:' \
-                % (len(products), kind, (rt == 'rt2' and 'function-test' \
-                                         or 'load-test'))
-        result_tmp.append('%s\n%s\n' % (title, '-' * len(title)))
-        result_tmp.append(self.as_txt_table(header, body) + '\n')
-      result.append('<span class="%s">VOB product results for the %s ' \
-                    'runtime</span><br/><br/>\n' \
-                    % ((any_failure and 'error_header' or 'header'), \
-                       (rt == 'rt2' and 'function-test' or 'load-test')))
-      result.append('( `<a href="products/">make all</a>\' )<br/><br/>\n')
-      result.append('<span class="stamp">%s - %s [%s]</span>\n' \
-                    % (stamp_begin, stamp_end, \
-                       utils.get_time_diff(False, stamp_begin, stamp_end)))
-      result.append('<pre>\n')
-      result.extend(result_tmp)
-      result.append('</pre>\n')
-    return ''.join(result)
-
-  def as_html(self, stamp_old, stamp_new, config, config_name, slave_name):
-    """ Return the HTML representation of all test results of the given slave
-        as a string.
-    """
-    result = [
-      '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
-      '<html>\n' \
-      '<head>\n' \
-      '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
-      '<link rel="stylesheet" type="text/css" href="../../index.css"/>\n' \
-      '<title>Shouldn\'t matter...</title>\n' \
-      '</head>\n' \
-      '<body>\n'
-    ]
-    uname_out = utils.run_cmd('uname -srmp')[1]
-    gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
-    result.append('<pre>\nPlatform: %s\nGCC/LLVM version: %s</pre>\n\n' \
-                  % (uname_out[0].strip(), gcc_out[0].strip()))
-    if self._titan:
-      result.append(self.as_html_titan(config_name, slave_name))
-    if self._regtest:
-      result.append(self.as_html_regtest(config_name, slave_name))
-    if self._perftest:
-      result.append(self.as_html_perftest(config_name, slave_name))
-    if self._eclipse:
-      result.append(self.as_html_eclipse(config_name, slave_name))
-    if self._functest:
-      result.append(self.as_html_functest(config_name, slave_name))
-    if self._vobtest:
-      result.append(self.as_html_vobtest(config_name, slave_name))
-    result += [
-      '</body>\n' \
-      '</html>\n'
-    ]
-    return ''.join(result)
-
-  def publish_csv2email(self, build_start, build_end, email_file, \
-                        slave_list, build_root, configs, reset):
-    """ Assemble a compact e-mail message from the CSV data provided by each
-        slave in the current build.  The assembled e-mail message is written
-        to a file.  It's ready to send.  It's called by the master.
-
-        Arguments:
-          build_start: Start of the whole build for all slaves.
-          build_end: End of the whole build for all slaves.
-          email_file: Store the e-mail message here.
-          slave_list: Slaves processed.
-          build_root: The actual build directory.
-          configs: All configurations.
-          reset: Reset statistics.
-    """
-    email_header = 'Full build time:\n----------------\n\n%s <-> %s\n\n' \
-                   % (build_start, build_end)
-    email_footer = 'For more detailed results, please visit:\n' \
-                   'http://ttcn.ericsson.se/titan-testresults/titan_builds or\n' \
-                   'http://ttcn.ericsson.se/titan-testresults/titan_builds/%s.\n\n' \
-                   'You\'re receiving this e-mail, because you\'re ' \
-                   'subscribed to daily TITAN build\nresults.  If you want ' \
-                   'to unsubscribe, please reply to this e-mail.  If you\n' \
-                   'received this e-mail by accident please report that ' \
-                   'too.  Thank you.\n' % build_root
-    email_matrix = 'The result matrix:\n------------------\n\n'
-    header = ('Slave/Action', 'TITAN build', 'Reg. tests', 'Perf. tests', \
-              'Func. tests', 'VOB tests', 'Eclipse build')  # It's long without abbrevs.
-    rows = []
-    slave_names = []
-    stat_handler = None
-    for slave in slave_list:
-      (slave_name, config_name, is_localhost) = slave
-      slave_names.append(config_name)
-      csv_file_name = '%s/%s/report.csv' \
-                      % (self._config.common['logdir'], config_name)
-      if 'measure' in configs[config_name] and configs[config_name]['measure']:
-        stat_handler = StatHandler(self._logger, self._config.common, configs, slave_list, reset) 
-      if not os.path.isfile(csv_file_name):
-        self._logger.error('It seems that we\'ve lost `%s\' for configuration `%s\'' % (slave_name, config_name))
-        local_row = [slave_name]
-        local_row.extend(['Lost'] * (len(header) - 1))
-        rows.append(local_row)
-        if stat_handler:
-          stat_handler.lost(config_name)
-        continue
-      csv_file = open(csv_file_name, 'rt')
-      csv_data = csv_file.readlines()
-      csv_file.close()
-      if len(csv_data) != 1:
-        self._logger.error('Error while processing `%s/%s/report.csv\' at ' \
-                           'the end, skipping slave' \
-                           % (self._config.common['logdir'], config_name))
-      else:
-        csv_data = csv_data[0].split(',')
-        local_row = [csv_data[4]]  # Should be `config_name'.
-        if stat_handler:
-          stat_handler.disabled_success_failure(config_name, csv_data[6:])
-        for result in csv_data[6:]:
-          if int(result) == -1:
-            local_row.append('Disabled')
-          elif int(result) == 0:
-            local_row.append('Success')
-          elif int(result) == 1:
-            local_row.append('*Failure*')
-        rows.append(local_row)
-    email_matrix += '%s\n' % self.as_txt_table(header, rows)
-    file = open(email_file, 'wt')
-    file.write(email_header)
-    if stat_handler:
-      file.write(str(stat_handler))
-    file.write(email_matrix)
-    file.write(email_footer)
-    file.close()
-
-  def backup_logs(self):
-    """ Handle archiving and backup activities.
-
-        Returns:
-          A dictionary with None values.
-    """
-    archived_builds = {}
-    for file in os.listdir(self._config.common['htmldir']):
-      if os.path.isdir('%s/%s' % (self._config.common['htmldir'], file)):
-        matched_dir = re.search('(\d{8}_\d{6})', file)
-        if not matched_dir:
-          continue
-        diff_in_days = utils.diff_in_days(matched_dir.group(1), utils.get_time(True))
-        if diff_in_days > self._config.common['archive']:
-          self._logger.debug('Archiving logs for build `%s\'' % matched_dir.group(1))
-          utils.run_cmd('cd %s && tar cf %s.tar %s' \
-                        % (self._config.common['htmldir'], \
-                           matched_dir.group(1), matched_dir.group(1)), None, 1800)
-          utils.run_cmd('bzip2 %s/%s.tar && rm -rf %s/%s' \
-                        % (self._config.common['htmldir'], matched_dir.group(1), \
-                           self._config.common['htmldir'], matched_dir.group(1)), None, 1800)
-          archived_builds[matched_dir.group(1)] = None
-      else:
-        matched_archive = re.search('(\d{8}_\d{6}).tar.bz2', file)
-        if not matched_archive:
-          continue
-        diff_in_days = utils.diff_in_days(matched_archive.group(1), utils.get_time(True))
-        if 'cleanup' in self._config.common and 'cleanupslave' in self._config.common and \
-           diff_in_days > self._config.common['cleanup']:
-          slave_name = self._config.common['cleanupslave']['slave']
-          if slave_name in self._config.slaves:
-            slave = self._config.slaves[slave_name]
-            slave_url = '%s@%s' % (slave['user'], slave['ip'])
-            utils.run_cmd('ssh %s \'mkdir -p %s\'' \
-                          % (slave_url, self._config.common['cleanupslave']['dir']))
-            (ret_val_scp, stdout_scp, stderr_scp) = \
-              utils.run_cmd('scp %s/%s %s:%s' \
-                            % (self._config.common['htmldir'], file, slave_url, \
-                               self._config.common['cleanupslave']['dir']))
-            if not ret_val_scp:
-              utils.run_cmd('rm -f %s/%s' % (self._config.common['htmldir'], file))
-              continue
-          else:
-            self._logger.error('Slave with name `%s\' cannot be found in ' \
-                               'the slaves\' list' % slave_name)
-        archived_builds[matched_archive.group(1)] = None
-    return archived_builds
-
-  def strip_tags(self, text):
-    """ Replace all '<', '>' etc. characters with their HTML equivalents.  """
-    return text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
-
-  def publish_html(self, build_root):
-    """ Create basic HTML output from the published directory structure.  It
-        should be regenerated after every build.  The .css file is generated
-        from here as well.  No external files used.  It is responsible for
-        publishing in general.
-
-        Arguments:
-          build_root: The actual build directory.
-    """
-    self.generate_css()
-    html_index = os.path.join(self._config.common['htmldir'], 'index.html')
-    html_menu = os.path.join(self._config.common['htmldir'], 'menu.html')
-    index_file = open(html_index, 'wt')
-    index_file.write(
-      '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
-      '<html>\n' \
-      '<head>\n' \
-      '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
-      '<link rel="stylesheet" type="text/css" href="index.css"/>\n' \
-      '<title>Build results (Updated: %s)</title>\n' \
-      '</head>\n' \
-      '<frameset cols="285,*">\n' \
-      '<frame src="menu.html" name="menu"/>\n' \
-      '<frame src="%s/report.txt" name="contents"/>\n' \
-      '</frameset>\n' \
-      '</html>\n' % (build_root, build_root))
-    index_file.close()
-    menu_file = open(html_menu, 'wt')
-    menu_contents_dict = self.backup_logs()
-    for root, dirs, files in os.walk(self._config.common['htmldir']):
-      build_match = re.match('(\d{8}_\d{6})', root.split('/')[-1])
-      if build_match:
-        dirs.sort()
-        dirs_list = ['<li><a href="%s/%s/report.html" target="contents">%s' \
-                     '</a></li>\n' % (build_match.group(1), elem, elem) for elem in dirs]
-        menu_contents_dict[build_match.group(1)] = dirs_list
-    sorted_keys = menu_contents_dict.keys()
-    sorted_keys.sort(reverse = True)
-    menu_contents = ''
-    bg_toggler = False
-    for build in sorted_keys:
-      build_data = menu_contents_dict[build]
-      if build_data:
-        menu_contents += \
-          '<tr>\n' \
-          '<td bgcolor="%s">\nBuild #: <b>' \
-          '<a href="%s/report.txt" target="contents">%s</a></b>\n' \
-          '<ul>\n%s</ul>\n' \
-          '</td>\n' \
-          '</tr>\n' % ((bg_toggler and '#a9c9e1' or '#ffffff'), build, \
-                       build, ''.join(build_data))
-        bg_toggler = not bg_toggler
-      else:
-        menu_contents += \
-          '<tr>\n' \
-          '<td bgcolor="#c1c1ba">\nBuild #: <b>' \
-          '<a href="%s.tar.bz2" target="contents">%s</a> (A)</b>\n' \
-          '</td>\n' \
-          '</tr>\n' % (build, build)
-    menu_file.write(
-      '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
-      '<html>\n' \
-      '<head>\n' \
-      '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
-      '<link rel="stylesheet" type="text/css" href="index.css"/>' \
-      '<title>Shouldn\'t matter...</title>\n' \
-      '</head>\n' \
-      '<body>\n<pre>\n' \
-      '      _\n'
-      ' ____( )___________\n'
-      '/_  _/ /_  _/  \   \\\n'
-      ' /_//_/ /_//_/\_\_\_\\\n'
-      '</pre>\n'
-      '<table class="Menu">\n' \
-      '%s\n' \
-      '</table>\n' \
-      '</body>\n' \
-      '</html>\n' % menu_contents)
-    menu_file.close()
-    self._plotter.collect_data()
-    self._plotter.plot(build_root)
-
-  def generate_css(self):
-    css_file = file('%s/index.css' % self._config.common['htmldir'], 'wt')
-    css_file.write(
-      'body, td {\n' \
-      '  font-family: Verdana, Cursor;\n' \
-      '  font-size: 10px;\n' \
-      '  font-weight: bold;\n' \
-      '}\n\n' \
-      'table {\n' \
-      '  border-spacing: 1px 1px;\n' \
-      '}\n\n' \
-      'table td {\n' \
-      '  padding: 8px 4px 8px 4px;\n' \
-      '}\n\n' \
-      'table.Menu td {\n' \
-      '  border: 1px gray solid;\n' \
-      '  text-align: left;\n' \
-      '  width: 160px;\n' \
-      '}\n\n' \
-      'pre {\n' \
-      '  font-size: 11px;\n' \
-      '  font-weight: normal;\n' \
-      '}\n\n'
-      'a:link,a:visited,a:active {\n' \
-      '  color: #00f;\n' \
-      '}\n\n'
-      'a:hover {\n' \
-      '  color: #444;\n' \
-      '}\n\n' \
-      '.error_header {\n' \
-      '  font-weight: bold;\n' \
-      '  font-size: 18px;\n' \
-      '  color: #f00;\n' \
-      '}\n\n' \
-      '.header {\n' \
-      '  font-weight: bold;\n' \
-      '  font-size: 18px;\n' \
-      '  color: #000;\n' \
-      '}\n\n' \
-      '.stamp {\n' \
-      '  font-size: 11px;\n' \
-      '}\n'
-    )
-    css_file.close()
-
-class plotter:
-  def __init__(self, logger, config):
-    self._logger = logger
-    self._config = config
-    self._htmldir = self._config.common.get('htmldir', '')
-    
-    self._stats = {}
-    
-  def collect_data(self):
-    self._logger.debug('Collecting statistical data for plotting to `%s\'' % self._htmldir)
-    dirs_to_check = [dir for dir in os.listdir(self._htmldir) \
-      if os.path.isdir(os.path.join(self._htmldir, dir)) \
-        and re.match('(\d{8}_\d{6})', dir)]
-    dirs_to_check.sort()
-    for dir in dirs_to_check:
-      date = '%s-%s-%s' % (dir[0:4], dir[4:6], dir[6:8])
-      date_dir = os.path.join(self._htmldir, dir)
-      platforms = [platform for platform in os.listdir(date_dir) \
-        if os.path.isdir(os.path.join(date_dir, platform))]
-      for platform in platforms:
-        platform_dir = os.path.join(date_dir, platform)
-        files = os.listdir(platform_dir)
-        files.sort()
-        stat_files = [file for file in files if 'perftest-stats' in file and file.endswith('csv')]
-        if len(stat_files) > 0 and len(stat_files) <= 2:
-          for file in stat_files:
-            rt = 'rt2' in file and 'rt2' or 'rt1'
-            if not rt in self._stats:
-              self._stats[rt] = {}
-            if not platform in self._stats[rt]:
-              self._stats[rt][platform] = []
-            file = open(os.path.join(platform_dir, file), 'rt')
-            for line in file:
-              dates_in = [d[0] for d in self._stats[rt][platform]] 
-              if not line.split(',')[0] in dates_in:
-                self._stats[rt][platform].append(line.split(','))
-            file.close()
-        else:
-          data_rt1 = [date]
-          data_rt2 = [date]
-          for file in files:
-            rt = 'rt2' in file and 'rt2' or 'rt1'
-            if not rt in self._stats:
-              self._stats[rt] = {}
-            if not platform in self._stats[rt]:
-              self._stats[rt][platform] = []
-            if re.match('perftest\.rt\d{1}\-\d+', file):
-              file = open(os.path.join(platform_dir, file), 'rt')
-              for line in file:
-                if re.search('=>>>Entities/Time', line):
-                  if rt == 'rt1':
-                    data_rt1.extend(line.split()[-5:-1])
-                  else:
-                    data_rt2.extend(line.split()[-5:-1])
-                  break
-              file.close()
-          if len(data_rt1) > 1:
-            dates_in = [d[0] for d in self._stats['rt1'][platform]]
-            if not data_rt1[0] in dates_in:
-              self._stats['rt1'][platform].append(data_rt1)
-          if len(data_rt2) > 1:
-            dates_in = [d[0] for d in self._stats['rt2'][platform]]
-            if not data_rt2[0] in dates_in:
-              self._stats['rt2'][platform].append(data_rt2)
-   
-  def plot(self, build_dir):
-    self._logger.debug('Plotting collected statistical data')
-    for runtime, runtime_data in self._stats.iteritems():
-      for config_name, config_data in runtime_data.iteritems():
-        target_dir = os.path.join(os.path.join(self._htmldir, build_dir), config_name)
-        if len(config_data) < 1 or not os.path.isdir(target_dir):
-          continue     
-        csv_file_name = os.path.join(target_dir, 'perftest-stats-%s.csv-tmp' % runtime)
-        cfg_file_name = os.path.join(target_dir, 'perftest-stats-%s.cfg' % runtime)
-        csv_file = open(csv_file_name, 'wt')
-        cfg_file = open(cfg_file_name, 'wt')
-        youngest = config_data[0][0]
-        oldest = config_data[0][0]
-        for line in config_data:
-          if line[0] < oldest:
-            oldest = line[0]
-          if line[0] > youngest:
-            youngest = line[0]
-          csv_file.write('%s\n' % ','.join(line).strip())
-        csv_file.close()
-        # `gnuplot' requires it to be sorted...
-        utils.run_cmd('cat %s | sort >%s' % (csv_file_name, csv_file_name[0:-4]))
-        utils.run_cmd('rm -f %s' % csv_file_name)
-        csv_file_name = csv_file_name[0:-4]
-        config = self._config.configs.get(config_name, {})
-        cps_min = config.get('cpsmin', 1000)
-        cps_max = config.get('cpsmax', 2000)
-        cps_diff = abs(cps_max - cps_min) / 5
-        cfg_file.write( \
-          'set title "TITANSim CPS Statistics with LGenBase\\n(%d-%d CPS on \\`%s\\\')"\n' \
-          'set datafile separator ","\n' \
-          'set xlabel "Date"\n' \
-          'set xdata time\n' \
-          'set timefmt "%%Y-%%m-%%d"\n' \
-          'set xrange ["%s":"%s"]\n' \
-          'set format x "%%b %%d\\n%%Y"\n' \
-          'set ylabel "CPS"\n' \
-          'set terminal svg size 640, 480\n' \
-          'set grid\n' \
-          'set key right bottom\n' \
-          'set key spacing 1\n' \
-          'set key box\n' \
-          'set output "%s/perftest-stats-%s.svg"\n' \
-          'plot "%s" using 1:5 title "%d CPS" with linespoints, \\\n' \
-          '"%s" using 1:9 title "%d CPS" with linespoints, \\\n' \
-          '"%s" using 1:13 title "%d CPS" with linespoints, \\\n' \
-          '"%s" using 1:17 title "%d CPS" with linespoints, \\\n' \
-          '"%s" using 1:21 title "%d CPS" with linespoints, \\\n' \
-          '"%s" using 1:25 title "%d CPS" with linespoints\n' \
-          % (cps_min, cps_max, config_name, oldest, youngest, target_dir,
-             runtime, csv_file_name, cps_min, csv_file_name,
-             cps_min + cps_diff, csv_file_name, cps_min + 2 * cps_diff,
-             csv_file_name, cps_min + 3 * cps_diff, csv_file_name,
-             cps_min + 4 * cps_diff, csv_file_name, cps_max))
-        cfg_file.close()
-        utils.run_cmd('gnuplot %s' % cfg_file_name)
-
-class StatHandler:
-  """ The implementation of this class is based on the format of `result.txt'.
-  """
-  def __init__(self, logger, common_configs, configs, slave_list, reset):
-    self._logger = logger
-    self._configs = configs
-    self._common_configs = common_configs
-    self._html_root = self._common_configs.get('htmldir') 
-    self._configs_to_support = []
-    self._first_period_started = None
-    self._period_started = None
-    self._overall_score = 0
-    self._overall_score_all = 0
-    self._period_score = 0
-    self._period_score_all = 0
-    for slave in slave_list:  # Prepare list of active configurations.
-      (slave_name, config_name, is_localhost) = slave
-      if not self.is_weekend_or_holiday() and config_name in self._configs and 'measure' in self._configs[config_name] and self._configs[config_name]['measure']: 
-        self._configs_to_support.append(config_name)
-    # Scan and parse the latest `report.txt' file.
-    dirs_to_check = [dir for dir in os.listdir(self._html_root) if os.path.isdir(os.path.join(self._html_root, dir)) and re.match('(\d{8}_\d{6})', dir)]
-    dirs_to_check.sort()
-    dirs_to_check.reverse()
-    for dir in dirs_to_check:
-      report_txt_path = os.path.join(self._html_root, os.path.join(dir, 'report.txt'))
-      if os.path.isfile(report_txt_path):
-        report_txt = open(report_txt_path, 'rt')
-        for line in report_txt:
-          first_period_line_matched = re.search('^First period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
-          overall_score_line_matched = re.search('^Overall score.*(\d+)/(\d+).*', line)
-          period_started_line_matched = re.search('^This period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
-          period_score_line_matched = re.search('^Period score.*(\d+)/(\d+).*', line)
-          if first_period_line_matched:
-            self._first_period_started = first_period_line_matched.group(1)
-          elif overall_score_line_matched:
-            self._overall_score = int(overall_score_line_matched.group(1))
-            self._overall_score_all = int(overall_score_line_matched.group(2))
-          elif period_started_line_matched:
-            self._period_started = period_started_line_matched.group(1)
-          elif period_score_line_matched:
-            self._period_score = int(period_score_line_matched.group(1))
-            self._period_score_all = int(period_score_line_matched.group(2))
-        report_txt.close()
-        if self._first_period_started is None or self._period_started is None \
-          or self._overall_score is None or self._overall_score_all is None \
-          or self._period_score is None or self._period_score_all is None:
-          self._logger.debug('Something is wrong with the report file `%s\'' \
-                             % report_txt_path)
-          continue
-        self._logger.debug('Using report file `%s\'' % report_txt_path)
-        break
-    if not self.is_weekend_or_holiday():
-      self._overall_score_all += (2 * len(self._configs_to_support))
-      self._period_score_all += (2 * len(self._configs_to_support))
-    if not self._first_period_started:
-      self._first_period_started = utils.get_time()
-    if not self._period_started:
-      self._period_started = utils.get_time()
-    if reset or int(utils.get_time_diff(False, self._period_started, utils.get_time(), True)[0]) / 24 >= self._common_configs.get('measureperiod', 30):
-      self._period_started = utils.get_time()
-      self._period_score = self._period_score_all = 0      
-      
-  def is_weekend_or_holiday(self):
-    """ Weekends or any special holidays to ignore.  """
-    ignore = int(time.strftime('%w')) == 0 or int(time.strftime('%w')) == 6
-    if not ignore:
-      holidays = ((1, 1), (3, 15), (5, 1), (8, 20), (10, 23), (11, 1), (12, 25), (12, 26))
-      month = int(time.strftime('%m'))
-      day = int(time.strftime('%d'))
-      for holiday in holidays:
-        if (month, day) == holiday:
-          ignore = True
-          break
-    return ignore
-
-  def lost(self, config_name):
-    if not config_name in self._configs_to_support:
-      return
-    self._overall_score += 1
-    self._period_score += 1
-  
-  def disabled_success_failure(self, config_name, results):
-    """ `results' is coming from the CSV file.  """
-    if not config_name in self._configs_to_support:
-      return
-    titan = int(results[0])
-    regtest = int(results[1])
-    perftest = int(results[2])  # Not counted.
-    functest = int(results[3])
-    # Nothing to do, unless a warning.
-    if titan == -1 or regtest == -1 or functest == -1:
-      self._logger.warning('Mandatory tests were disabled for build '
-                           'configuration `%s\', the generated statistics ' \
-                           'may be false, check it out' % config_name)
-    if titan == 0 and regtest == 0 and functest == 0:
-      self._overall_score += 2
-      self._period_score += 2 
-
-  def percent(self, score, score_all):
-    try:
-      ret_val = (float(score) / float(score_all)) * 100.0
-    except:
-      return 0.0; 
-    return ret_val;
-  
-  def buzzword(self, percent):
-    if percent > 80.0: return 'Stretched'
-    elif percent > 70.0: return 'Commitment'
-    elif percent > 60.0: return 'Robust'
-    else: return 'Unimaginable'
-    
-  def __str__(self):
-    if len(self._configs_to_support) == 0:
-      return ''
-    overall_percent = self.percent(self._overall_score, self._overall_score_all)
-    period_percent = self.percent(self._period_score, self._period_score_all)
-    ret_val = 'Statistics:\n-----------\n\n' \
-      'Configurations: %s\n' \
-      'First period: %s\n' \
-      'Overall score: %d/%d (%.2f%%) %s\n' \
-      'This period: %s\n' \
-      'Period score: %d/%d (%.2f%%) %s\n\n' \
-      % (', '.join(self._configs_to_support), self._first_period_started, self._overall_score, self._overall_score_all,
-         overall_percent, self.buzzword(overall_percent), self._period_started,
-         self._period_score, self._period_score_all, period_percent, self.buzzword(period_percent))
-    return ret_val
diff --git a/etc/autotest/web/titan_builder.css b/etc/autotest/web/titan_builder.css
deleted file mode 100644
index bdda6230f..000000000
--- a/etc/autotest/web/titan_builder.css
+++ /dev/null
@@ -1,46 +0,0 @@
-body {
-  margin-bottom:50px;
-}
-
-body, td {
-  font-family: Verdana, Cursor;
-  font-size: 12px;
-  font-weight: normal;
-}
-
-a:link,a:visited,a:active {
-  color: #444;
-}
-
-a:hover {
-  color: #000000;
-}
-
-table {
-  border-spacing: 1px 1px;
-}
-
-table td {
-  padding: 3px 0px 3px 0px;
-  text-align: left;
-}
-
-.history {
-  padding: 3px 0px 3px 0px;
-  text-align: left;
-  background-color: #eaeaea;
-}
-
-input textline {
-  font-family: Verdana, Cursor;
-  font-weight: normal;
-  font-size: 10px;
-}
-
-input button {
-  background-color: #447799;
-  font-family: Verdana, Cursor;
-  font-weight: bold;
-  font-size: 10px;
-  color: white;
-}
diff --git a/etc/autotest/web/titan_builder.php b/etc/autotest/web/titan_builder.php
deleted file mode 100644
index 5f83317f6..000000000
--- a/etc/autotest/web/titan_builder.php
+++ /dev/null
@@ -1,228 +0,0 @@
-<html>
-<head>
-<link href="titan_builder.css" rel="stylesheet" type="text/css" />
-<title>
-Web interface of the automatic Titan test builder...
-</title>
-<script>
-function validate_selections()
-{
-  var platforms_selection = document.getElementById('platforms[]');
-  var recipients_selection = document.getElementById('selected_recipients[]');
-  var platforms = false;
-  var recipients = false;
-  for (var i = 0; i < platforms_selection.options.length; i++) {
-    if (platforms_selection.options[i].selected && platforms_selection.options[i].text.length > 0) {
-      platforms = true;
-      break;
-    }
-  }
-  if (recipients_selection.options.length > 0 && recipients_selection.options[0].text.length > 0) {
-    for (var i = 0; i < platforms_selection.options.length; i++) {
-      recipients_selection.options[i].selected = true;
-    }
-    recipients = true;
-  }
-  if (!platforms && !recipients) {
-    alert('At least one platform and one recipient needs to be selected.');
-  } else if (!platforms) {
-    alert('At least one platform needs to be selected.');
-  } else if (!recipients) {
-    alert('At least one recipient needs to be selected.');
-  }
-  return platforms && recipients;
-}
-function addtolist(sourceID, targetID)
-{
-  source = document.getElementById(sourceID);
-  target = document.getElementById(targetID);
-  numberOfItems = source.options.length;
-  insertPt = target.options.length;
-  if (target.options[0].text === "") { insertPt = 0; }
-  for (i = 0; i < numberOfItems; i++) {
-    if (source.options[i].selected === true) {
-      msg = source.options[i].text;
-      for (j = 0; j < target.options.length; j++) {
-        if (msg === target.options[j].text) {
-          j = -1;
-          break;
-        }
-      }
-      if (j > 0) {
-        target.options[insertPt] = new Option(msg);
-        insertPt = target.options.length;
-      }
-    }
-  }
-}
-function takefromlist(targetID)
-{
-  target = document.getElementById(targetID);
-  if (target.options.length < 0) { return; }
-  for (var i = target.options.length - 1; i >= 0; i--) {
-    if (target.options[i].selected) {
-      target.options[i] = null;
-      if (target.options.length === 0) { target.options[0] = new Option(""); }
-    }
-  }
-}
-</script>
-</head>
-<body>
-<h1>Welcome to the web interface of the automatic Titan test builder...</h1>
-<form method="POST" action="<?php echo $_SERVER['PHP_SELF']; ?>" onSubmit="return validate_selections()">
-<input type="hidden" name="_submit_check" value="1" />
-<?php
-define("HISTORY_FILE", "./titan_builder_history.txt");
-
-function get_platforms_selection($platforms)
-{
-  $result =
-    "<table>\n" .
-    "<tr>\n" .
-    "<td>Select platform(s):</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<td><select name=\"platforms[]\" id= \"platforms[]\" multiple size=\"" . count($platforms) . "\">\n";
-  for ($i = 0; $i < count($platforms); $i++) {
-    $platform = split(" ", $platforms[$i]);
-    $result .= "<option value=\"" . $platform[0] . "\">" . $platform[0] . "</option>\n";
-  }
-  $result .=
-    "</select></td>\n" .
-    "</tr>\n" .
-    "</table>\n";
-  return $result;
-}
-
-function get_recipients_selection($recipients)
-{
-  $result =
-    "<table border=\"0\">\n" .
-    "<tr>\n" .
-    "<td align=\"left\">Select recipient(s):</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<th><select name=\"all_recipients[]\" id=\"all_recipients[]\" size=\"" . count($recipients) . "\" multiple=\"multiple\" style=\"width:320;height:180\"" .
-    "ondblclick=\"addtolist('all_recipients[]', 'selected_recipients[]');\">\n";
-  for ($i = 0; $i < count($recipients); $i++) {
-    $result .= "<option>" . htmlspecialchars($recipients[$i]) . "</option>\n";
-  }
-  $result .=
-    "</select>&nbsp;</th>\n" .
-    "<th style=\"vertical-align:middle\">\n" .
-    "<input type=\"button\" onclick=\"addtolist('all_recipients[]', 'selected_recipients[]');\" value=\"--&gt;\" /><br />\n" .
-    "<input type=\"button\" style=\"button\" onclick=\"takefromlist('selected_recipients[]');\" value=\"&lt;--\" />\n" .
-    "</th>\n" .
-    "<th>&nbsp;\n" .
-    "<select name=\"selected_recipients[]\" id=\"selected_recipients[]\" size=\"" . count($recipients) . "\" multiple=\"multiple\" style=\"width:320;height:180\"" .
-    "ondblclick=\"takefromlist('selected_recipients[]');\">\n" .
-    "<option></option>\n" .
-    "</select>\n" .
-    "</th>\n" .
-    "</tr>\n" .
-    "</table>\n";
-  return $result;
-}
-
-function get_tests_selection()
-{
-  $result =
-    "<table border=\"0\">\n" .
-    "<tr>\n" .
-    "<td align=\"left\">Select test(s) to run:</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<td><input type=\"checkbox\" disabled=\"disabled\" checked=\"checked\" /> Build</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<td><input type=\"checkbox\" name=\"regtests\" value=\"regtests\" /> Regression tests</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<td><input type=\"checkbox\" name=\"functests\" value=\"functests\" /> Function tests</td>\n" .
-    "</tr>\n" .
-    "<tr>\n" .
-    "<td><input type=\"checkbox\" name=\"perftests\" value=\"perftests\" /> Performance tests</td>\n" .
-    "</tr>\n" .
-    "</table>\n" .
-    "<p><input type=\"submit\" name=\"press_and_pray\" value=\"Press & Pray!\"></p>\n";
-  return $result;
-}
-
-function get_history()
-{
-  if (!file_exists(HISTORY_FILE)) {
-    return "";
-  }
-  $file = fopen(HISTORY_FILE, "r");
-  $result = "<br />\n";
-  while (!feof($file)) {
-    $result .= (htmlspecialchars(fgets($file)) . "<br />\n");
-  }
-  fclose($file);
-  return $result;
-}
-
-function get_scripts()
-{
-  $command = "ssh titanrt@esekits1064 \"bash -c 'cd /home/titanrt/titan_nightly_builds && rm -rf *.{py,pyc,sh} TTCNv3/etc/autotest ;";
-  $command .= " . /home/titanrt/.titan_builder ; cvs co TTCNv3/etc/autotest ; ln -sf TTCNv3/etc/autotest/*.{py,sh} .'\"";
-  pclose(popen($command, "r"));
-  echo "<p>" . htmlspecialchars($command) . "</p>";
-}
-
-function start_the_build($platform_strings, $recipient_strings, $test_strings)
-{
-  $command = "ssh titanrt@esekits1064 \"bash -c 'cd /home/titanrt/titan_nightly_builds && . /home/titanrt/.titan_builder ;";
-  $command .= " ./titan_builder.sh -c " . $platform_strings . " -A \"'\"'\"" . $recipient_strings . "\"'\"'\" " . (strlen($test_strings) > 0 ? "-t " . $test_strings : "") . "'\" &";
-  pclose(popen($command, "r"));
-  echo "<p>" . htmlspecialchars($command) . "</p>";
-}
-
-exec('/home/titanrt/titan_nightly_builds/titan_builder.py -d', $output_platforms);
-exec('/home/titanrt/titan_nightly_builds/titan_builder.py -a', $output_recipients);
-if (array_key_exists("_submit_check", $_POST)) {
-  echo "Notification will be sent to the following recipients: "
-    . htmlspecialchars(implode(", ", $_POST["selected_recipients"])) . "<br /><br />\n";
-  echo "The build has started on the following platforms:<br /><br />\n";
-  for ($i = 0; $i < count($_POST["platforms"]); $i++) {
-    echo $i . ". " . $_POST["platforms"][$i] . "<br />\n";
-  }
-  $platform_strings = implode(",", $_POST["platforms"]);
-  $recipient_strings = implode(",", $_POST["selected_recipients"]);
-  $test_strings = ((isset($_POST["regtests"]) && $_POST["regtests"] == "regtests") ? "r" : "")
-    . ((isset($_POST["functests"]) && $_POST["functests"] == "functests") ? "f" : "")
-    . ((isset($_POST["perftests"]) && $_POST["perftests"] == "perftests") ? "p" : "");
-  start_the_build($platform_strings, $recipient_strings, $test_strings);
-  $contents = file_get_contents(HISTORY_FILE);
-  file_put_contents(HISTORY_FILE, date("Y-m-d H:i:s") . " "
-    . implode(", ", $_POST["selected_recipients"]) . " "
-    . implode(", ", $_POST["platforms"]) . "\n" . $contents);
-} else {
-  get_scripts();
-  $html_output =
-    "<table>\n" .
-    "<tr>\n<td>\n" .
-    "Run multiple tests at the same time for your own risk.<br />\n" .
-    "Number of available platforms: " . count($output_platforms) . ".<br />\n" .
-    "The tests will be run by `titanrt' on the selected platform(s).\n" .
-    "</td>\n</tr>\n" .
-    "<tr>\n<td>\n" .
-    get_platforms_selection($output_platforms) .
-    "</td>\n</tr>\n" .
-    "<tr>\n<td>\n" .
-    get_recipients_selection($output_recipients) .
-    "</td>\n</tr>\n" .
-    "<tr>\n<td>\n" .
-    get_tests_selection() .
-    "</td>\n</tr>\n" .
-    "<tr>\n<td class=\"history\">\n" .
-    get_history() .
-    "</td>\n</tr>\n" .
-    "</table>\n";
-  echo $html_output;
-}
-?>
-</form>
-</body>
-</html>
diff --git a/etc/scripts/cfg_msg_maker.py b/etc/scripts/cfg_msg_maker.py
deleted file mode 100644
index 504635fde..000000000
--- a/etc/scripts/cfg_msg_maker.py
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   Balasko, Jeno
-#   Delic, Adam
-#
-##############################################################################
-header = "TITAN"
-str = header + """
- ________    _____   ________     ____        __      _
-(___  ___)  (_   _) (___  ___)   (    )      /  \    / )
-    ) )       | |       ) )      / /\ \     / /\ \  / /
-   ( (        | |      ( (      ( (__) )    ) ) ) ) ) )
-    ) )       | |       ) )      )    (    ( ( ( ( ( (
-   ( (       _| |__    ( (      /  /\  \   / /  \ \/ /
-   /__\     /_____(    /__\    /__(  )__\ (_/    \__/
-"""
-encoded = ""
-for c in str:
-  code = ord(c)
-  for i in range(7,-1,-1):
-    if (code & (1<<i)): ch = "\t"
-    else: ch = " "
-    encoded += ch
-cfgfile = open('message.cfg', 'w')
-cfgfile.write("[DEFINE]\n");
-cfgfile.write("// include this cfg file or copy paste the following whitespaces into your main cfg file\n");
-cfgfile.write(encoded);
-cfgfile.write("\n// end of message\n");
-cfgfile.close();
diff --git a/etc/scripts/tpd_graph_xml2dot.py b/etc/scripts/tpd_graph_xml2dot.py
deleted file mode 100644
index 6a64f881e..000000000
--- a/etc/scripts/tpd_graph_xml2dot.py
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2000-2017 Ericsson Telecom AB
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Contributors:
-#   Balasko, Jeno
-#   Delic, Adam
-#
-##############################################################################
-import xml.etree.ElementTree as ET
-tree = ET.parse('project_hierarchy_graph.xml')
-root = tree.getroot()
-f = open('project_hierarchy_graph.dot', 'w')
-f.write("digraph PROJECT_HIERARCHY_GRAPH {\n")
-for project in root:
-	for reference in project:
-		f.write(project.attrib['name'])
-		f.write(" -> ")
-		f.write(reference.attrib['name'])
-		f.write(";\n")
-f.write("}\n")
-f.close()
-
-# use this to generate graph:
-# > dot -Tpng project_hierarchy_graph.dot -o project_hierarchy_graph.png
-- 
GitLab