-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathmain.py
More file actions
executable file
·143 lines (120 loc) · 5.42 KB
/
main.py
File metadata and controls
executable file
·143 lines (120 loc) · 5.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Given a system configuration, a workload description, and a scheduler as
# generated by `scheduler', this program execute jobs accordingly to the scheduler decisions.
#
# Copyright © 2017 Marcelo Amaral <marcelo.amaral@bsc.es>
from __future__ import division
import os
import json
import logging
import configparser
from collections import OrderedDict
from datetime import datetime
from time import sleep
from time import time
from src.system.enforcement import executor
from src.system import system as sched_system
def collect_metric(start_time, interval, output_folder):
"""This function creates a thread that will execute the script to collect system metrics"""
cwd = os.getcwd()
script_path = os.path.join(cwd, "data/workload-manifest/scripts_to_collect_metrics/collect_metrics.sh")
out_directory = output_folder + "/logs/"
cmd = list()
cmd.append(script_path)
cmd.append(str(start_time))
cmd.append(str(interval)) # Monitoring Interval
cmd.append(out_directory) # Directory for the output
executor.create_job(cmd, background=True)
def kill_backgroud_process(output_folder):
"""Kill the running backgound process"""
cwd = os.getcwd()
path = os.path.join(cwd, "etc/kill_all.sh")
out_directory = output_folder + "/logs/"
cmd = list()
cmd.append(path)
cmd.append(out_directory) # Directory for the output
executor.create_job(cmd, background=True)
cwd = os.getcwd()
sys_config = configparser.ConfigParser(delimiters=("="))
path = os.path.join(cwd, "etc/configs/sys-config.ini")
sys_config.read(path)
config_files = list()
path = os.path.join(cwd, "etc/configs/")
print path
for root, directories, files in os.walk(path):
print root
for file in files:
if 'sched' in file:
config_files.append(root + file)
"""Run multiple simulations accordingly to the available schedule config files"""
d = datetime.now()
day = d.strftime("%d-%m-%y")
if not sys_config.getboolean("simulator", "enabled"):
day += "-real"
else:
day += "-simu"
num_machines = json.loads(sys_config.get("system", "num_machines"))
for num, config_file in enumerate(config_files):
start_time = time()
# GENERATE WORKLOAD -- uncomment the next line if it necessary to create the workloads
# work_gen.gen_jobs(seed=1234, num_jobs=100, lamb_job=4, lamb_work=2)
workload_file = json.loads(sys_config.get("workload", "workload_file"))
path = os.path.join(cwd, "data/" + workload_file + ".json")
jobs = json.load(open(path))
jobs = OrderedDict(sorted(jobs.items(), key=lambda t: t[1])) # job IDs indexed by arrival time
# OUTPUT FOLDER
sched_config = configparser.ConfigParser(delimiters=("="))
sched_config.read(config_file)
algo_name = json.loads(sched_config.get("scheduler", "sched_type"))
result_folder = ""
if "utilityaware" in algo_name:
policy = json.loads(sched_config.get("scheduler", "policy"))
postpone = json.loads(sched_config.get("scheduler", "postpone"))
folder = "results/" + workload_file + "-machines-" + str(num_machines) +"/" + str(day) + "/algo-" + algo_name + "-policy-" + policy +\
"-postponed-" + str(postpone) + "/"
result_folder = os.path.join(cwd, folder)
else:
folder = "results/" + workload_file + "-machines-" + str(num_machines) +"/" + str(day) + "/algo-" + algo_name + "/"
result_folder = os.path.join(cwd, folder)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
# CONFIGURE THE LOGGING
logging.basicConfig(format="%(message)s", level=logging.DEBUG)
logger = logging.getLogger('simulator')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(result_folder + 'simulation.log')
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# START THE SYSTEM METRICS COLLECTOR
if not sys_config.getboolean("simulator", "enabled"):
interval = 5
collect_metric(start_time, interval, result_folder)
# START THE SYSTEM
system = sched_system.System(sys_config, sched_config, result_folder, logger, start_time)
system.start(jobs)
# SAVE STATISTICS
with open(result_folder + "placement_stats.json", "w+") as out:
json.dump(system.placement_stats, out, sort_keys=True, indent=2, separators=(',', ':'))
with open(result_folder + "system_stats.json", "w+") as out:
json.dump(system.system_stats, out, sort_keys=True, indent=2, separators=(',', ':'))
with open(result_folder + "sched_stats.json", "w+") as out:
json.dump(system.sched_stats, out, sort_keys=True, indent=2, separators=(',', ':'))
with open(result_folder + "ending.out", "w+") as out:
out.write("The simulation was executed in " + str(time() - start_time) + " seconds \n")
out.write("Config file " + str(num) + " " + config_file + "\n")
logger.info("The simulation was executed in " + str(time() - start_time) + " seconds")
logger.info("Config file " + str(num) + " " + config_file)
# STOP THE METRIC COLLECTOR
if not sys_config.getboolean("simulator", "enabled"):
kill_backgroud_process(result_folder)
exit(1)