source: trunk/platforms/tsar_generic_xbar/scripts/create_graphs.py @ 1023

Last change on this file since 1023 was 1012, checked in by meunier, 9 years ago
  • Update of simulation scripts for tsar_generic_xbar
  • Property svn:executable set to *
File size: 32.4 KB
Line 
1#!/usr/bin/python
2
3import subprocess
4import os
5import re
6import sys
7
8
9#apps = [ 'histo-opt', 'mandel', 'filt_ga', 'radix_ga', 'fft_ga', 'pca-opt', 'fft', 'radix', 'filter', 'kmeans-opt' ]
10apps = [ 'fft_ga', 'filt_ga', 'lu', 'radix_ga', 'histo-opt', 'mandel', 'pca-opt', 'kmeans-opt' ]
11#apps = [ 'histogram', 'mandel', 'filter', 'fft', 'fft_ga', 'filt_ga', 'pca', 'lu' ]  # radix radix_ga kmeans
12#apps = [ 'histo-opt', 'histogram2', 'histo-opt2' ]
13#nb_procs = [ 1, 4, 8, 16, 32, 64, 128, 256 ]
14nb_procs = [ 1, 4, 8, 16, 32, 64, 128, 256 ]
15single_protocols = ['dhccp', 'hmesi']
16joint_protocols = ['dhccp', 'hmesi']
17#joint_protocols = []
18
19top_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
20scripts_path = os.path.join(top_path, 'scripts')
21counter_defs_name = os.path.join(scripts_path, "counter_defs.py")
22
23exec(file(counter_defs_name))
24
25gen_dir = 'generated'
26graph_dir = 'graph'
27template_dir = 'templates'
28data_dir = 'data'
29
30log_stdo_name = '_stdo_'
31log_term_name = '_term_'
32
33coherence_tmpl = os.path.join(scripts_path, template_dir, 'coherence_template.gp') # 1 graph per appli
34speedup_tmpl   = os.path.join(scripts_path, template_dir, 'speedup_template.gp')
35metric_tmpl    = os.path.join(scripts_path, template_dir, 'metric_template.gp') # 1 graph per metric
36stacked_tmpl   = os.path.join(scripts_path, template_dir, 'stacked_template.gp')
37
38
39
40def create_file(name, content):
41   file = open(name, 'w')
42   file.write(content)
43   file.close()
44   
45def is_numeric(s):
46   try:
47      float(s)
48      return True
49   except ValueError:
50      return False
51
52def get_x_y(nb_procs):
53   x = 1
54   y = 1
55   to_x = True
56   while (x * y * 4 < nb_procs):
57      if to_x:
58         x = x * 2
59      else:
60         y = y * 2
61      to_x = not to_x
62   return x, y
63
64
65
66# We first fill the m_metric_id table
67for metric in all_metrics:
68   for tag in all_tags:
69      if m_metric_tag[metric] == tag:
70         m_metric_id[tag] = metric
71         break
72
73
74# We start by processing all the log files
75# Term files are processed for exec time only
76# Init files are processed for all metrics
77exec_time = {}
78metrics_val = {}
79for prot in joint_protocols:
80   metrics_val[prot] = {}
81   exec_time[prot] = {}
82   for app in apps:
83      exec_time[prot][app] = {}
84      metrics_val[prot][app] = {}
85      for i in nb_procs:
86         metrics_val[prot][app][i] = {}
87         log_stdo_file = os.path.join(scripts_path, data_dir, app + '_' + prot + log_stdo_name + str(i))
88         log_term_file = os.path.join(scripts_path, data_dir, app + '_' + prot + log_term_name + str(i))
89   
90         # Term
91         lines = open(log_term_file, 'r')
92         for line in lines:
93            tokens = line[:-1].split()
94            if len(tokens) > 0 and tokens[0] == "[PARALLEL_COMPUTE]":
95               exec_time[prot][app][i] = int(tokens[len(tokens) - 1])
96   
97         # Init files
98         lines = open(log_stdo_file, 'r')
99         for line in lines:
100            tokens = line[:-1].split()
101            if len(tokens) == 0:
102               continue
103            tag = tokens[0]
104            value = tokens[len(tokens) - 1]
105            pattern = re.compile('\[0[0-9][0-9]\]')
106            if pattern.match(tag):
107               metric = m_metric_id[tag]
108               if (not metrics_val[prot][app][i].has_key(metric) or tag == "[000]" or tag == "[001]"):
109                  # We don't add cycles of all Memcaches (they must be the same for all)
110                  metrics_val[prot][app][i][metric] = int(value)
111               else:
112                  metrics_val[prot][app][i][metric] += int(value)
113   
114# Completing unset metrics (i.e. they are not present in the data file) with 0
115for prot in joint_protocols:
116   for app in apps:
117      for i in nb_procs:
118         for metric in all_metrics:
119            if metric not in metrics_val[prot][app][i]:
120               metrics_val[prot][app][i][metric] = 0
121
122# We make a 2nd pass to fill the derived fields, e.g. nb_total_updates
123for prot in joint_protocols:
124   for app in apps:
125      for i in nb_procs:
126         x, y = get_x_y(i)
127         metrics_val[prot][app][i]['total_read']      = metrics_val[prot][app][i]['local_read']      + metrics_val[prot][app][i]['remote_read']
128         metrics_val[prot][app][i]['total_write']     = metrics_val[prot][app][i]['local_write']     + metrics_val[prot][app][i]['remote_write']
129         metrics_val[prot][app][i]['total_ll']        = metrics_val[prot][app][i]['local_ll']        + metrics_val[prot][app][i]['remote_ll']
130         metrics_val[prot][app][i]['total_sc']        = metrics_val[prot][app][i]['local_sc']        + metrics_val[prot][app][i]['remote_sc']
131         metrics_val[prot][app][i]['total_cas']       = metrics_val[prot][app][i]['local_cas']       + metrics_val[prot][app][i]['remote_cas']
132         metrics_val[prot][app][i]['total_update']    = metrics_val[prot][app][i]['local_update']    + metrics_val[prot][app][i]['remote_update']
133         metrics_val[prot][app][i]['total_m_inv']     = metrics_val[prot][app][i]['local_m_inv']     + metrics_val[prot][app][i]['remote_m_inv']
134         metrics_val[prot][app][i]['total_cleanup']   = metrics_val[prot][app][i]['local_cleanup']   + metrics_val[prot][app][i]['remote_cleanup']
135         metrics_val[prot][app][i]['total_cleanup_d'] = metrics_val[prot][app][i]['local_cleanup_d'] + metrics_val[prot][app][i]['remote_cleanup_d']
136         metrics_val[prot][app][i]['total_getm']      = metrics_val[prot][app][i]['local_getm']      + metrics_val[prot][app][i]['remote_getm']
137         metrics_val[prot][app][i]['total_inval_ro']  = metrics_val[prot][app][i]['local_inval_ro']  + metrics_val[prot][app][i]['remote_inval_ro']
138         metrics_val[prot][app][i]['total_direct']    = metrics_val[prot][app][i]['total_read']      + metrics_val[prot][app][i]['total_write']
139         metrics_val[prot][app][i]['total_ncc_to_cc'] = metrics_val[prot][app][i]['ncc_to_cc_read']  + metrics_val[prot][app][i]['ncc_to_cc_write']
140         metrics_val[prot][app][i]['direct_cost']     = metrics_val[prot][app][i]['read_cost']       + metrics_val[prot][app][i]['write_cost']
141         metrics_val[prot][app][i]['broadcast_cost']  = metrics_val[prot][app][i]['broadcast'] * 2 * (x * y - 1)
142         metrics_val[prot][app][i]['coherence_cost']  = metrics_val[prot][app][i]['broadcast_cost']  + metrics_val[prot][app][i]['m_inv_cost'] + metrics_val[prot][app][i]['update_cost']
143         if metrics_val[prot][app][i]['broadcast'] < metrics_val[prot][app][i]['write_broadcast']:
144            # test to patch a bug in mem_cache
145            metrics_val[prot][app][i]['nonwrite_broadcast'] = 0
146            print "*** Error which should not happen anymore: incorrect number of Broadcasts/Write Broadcasts"
147         else:
148            metrics_val[prot][app][i]['nonwrite_broadcast'] = metrics_val[prot][app][i]['broadcast'] - metrics_val[prot][app][i]['write_broadcast']
149   
150         metrics_val[prot][app][i]['total_stacked'] = 0
151         for stacked_metric in stacked_metrics:
152            metrics_val[prot][app][i]['total_stacked'] += metrics_val[prot][app][i][stacked_metric]
153
154           
155print "mkdir -p", os.path.join(scripts_path, gen_dir)
156subprocess.call([ 'mkdir', '-p', os.path.join(scripts_path, gen_dir) ])
157
158print "mkdir -p", os.path.join(scripts_path, graph_dir)
159subprocess.call([ 'mkdir', '-p', os.path.join(scripts_path, graph_dir) ])
160
161############################################################
162### Graph 1 : Coherence traffic Cost per application     ###
163############################################################
164
165for prot in single_protocols:
166   for app in apps:
167      data_coherence_name = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_coherence.dat')
168      gp_coherence_name   = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_coherence.gp')
169   
170      # Creating the data file
171      width = 15
172      content = ""
173     
174      for metric in [ '#nb_procs' ] + grouped_metrics:
175         content += metric + " "
176         nb_spaces = width - len(metric)
177         content += nb_spaces * ' '
178      content += "\n"
179   
180      for i in nb_procs:
181         content += "%-15d " % i
182         for metric in grouped_metrics:
183            val = float(metrics_val[prot][app][i][metric]) / exec_time[prot][app][i] * 1000
184            content += "%-15f " % val
185         content += "\n"
186     
187      create_file(data_coherence_name, content)
188   
189      # Creating the gp file
190      template_file = open(coherence_tmpl, 'r')
191      template = template_file.read()
192     
193      plot_str = ""
194      col = 2
195      for metric in grouped_metrics:
196         if metric != grouped_metrics[0]:
197            plot_str += ", \\\n    "
198         plot_str += "\"" + data_coherence_name + "\" using ($1):($" + str(col) + ") lc rgb " + colors[col - 2] + " title \"" + m_metric_name[metric] + "\" with linespoint"
199         col += 1
200      gp_commands = template % dict(app_name = m_app_name[app], nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + app + '_coherence'))
201     
202      create_file(gp_coherence_name, gp_commands)
203     
204      # Calling gnuplot
205      print "gnuplot", gp_coherence_name
206      subprocess.call([ 'gnuplot', gp_coherence_name ])
207
208
209############################################################
210### Graph 2 : Speedup per Application                    ###
211############################################################
212
213for prot in single_protocols:
214   for app in apps:
215   
216      data_speedup_name = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_speedup.dat')
217      gp_speedup_name   = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_speedup.gp')
218     
219      # Creating data file
220      width = 15
221      content = "#nb_procs"
222      nb_spaces = width - len(content)
223      content += nb_spaces * ' '
224      content += "speedup\n"
225   
226      for i in nb_procs:
227         content += "%-15d " % i
228         val = exec_time[prot][app][i]
229         content += "%-15f\n" % (exec_time[prot][app][1] / float(val))
230   
231      plot_str = "\"" + data_speedup_name + "\" using ($1):($2) lc rgb \"#654387\" title \"Speedup\" with linespoint"
232     
233      create_file(data_speedup_name, content)
234     
235      # Creating the gp file
236      template_file = open(speedup_tmpl, 'r')
237      template = template_file.read()
238     
239      gp_commands = template % dict(appli = m_app_name[app], nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + app + '_speedup'))
240     
241      create_file(gp_speedup_name, gp_commands)
242     
243      # Calling gnuplot
244      print "gnuplot", gp_speedup_name
245      subprocess.call([ 'gnuplot', gp_speedup_name ])
246
247
248############################################################
249### Graph 3 : All speedups on the same Graph             ###
250############################################################
251
252for prot in single_protocols:
253   # This graph uses the same template as the graph 2
254   data_speedup_name = os.path.join(scripts_path, gen_dir, prot + '_all_speedup.dat')
255   gp_speedup_name   = os.path.join(scripts_path, gen_dir, prot + '_all_speedup.gp')
256   
257   # Creating data file
258   width = 15
259   content = "#nb_procs"
260   nb_spaces = width - len(content)
261   content += (nb_spaces + 1) * ' '
262   for app in apps:
263      content += app + " "
264      content += (width - len(app)) * " "
265   content += "\n"
266   
267   for i in nb_procs:
268      content += "%-15d " % i
269      for app in apps:
270         val = exec_time[prot][app][i]
271         content += "%-15f " % (exec_time[prot][app][1] / float(val))
272      content += "\n"
273   
274   create_file(data_speedup_name, content)
275   
276   # Creating gp file
277   template_file = open(speedup_tmpl, 'r')
278   template = template_file.read()
279   
280   plot_str = ""
281   col = 2
282   for app in apps:
283      if app != apps[0]:
284         plot_str += ", \\\n     "
285      plot_str += "\"" + data_speedup_name + "\" using ($1):($" + str(col) + ") lc rgb %s title \"" % (colors[col - 2])  + m_app_name[app] + "\" with linespoint"
286      col += 1
287   
288   gp_commands = template % dict(appli = "All Applications", nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_all_speedup'))
289     
290   create_file(gp_speedup_name, gp_commands)
291     
292   # Calling gnuplot
293   print "gnuplot", gp_speedup_name
294   subprocess.call([ 'gnuplot', gp_speedup_name ])
295
296
297############################################################
298### Graph 4 : Graph per metric                           ###
299############################################################
300
301# The following section creates the graphs grouped by measure (e.g. #broadcasts)
302# The template file cannot be easily created otherwise it would not be generic
303# in many ways. This is why it is mainly created here.
304# Graphs are created for metric in the "individual_metrics" list
305
306for prot in single_protocols:
307   for metric in individual_metrics:
308      data_metric_name = os.path.join(scripts_path, gen_dir, prot + '_' + metric + '.dat')
309      gp_metric_name   = os.path.join(scripts_path, gen_dir, prot + '_' + metric + '.gp')
310   
311      # Creating the gp file
312      # Setting xtics, i.e. number of procs for each application
313      xtics_str = "("
314      first = True
315      xpos = 1
316      app_labels = ""
317      for num_appli in range(0, len(apps)):
318         for i in nb_procs:
319            if not first:
320               xtics_str += ", "
321            first = False
322            if i == nb_procs[0]:
323               xpos_first = xpos
324            xtics_str += "\"%d\" %.1f" % (i, xpos)
325            xpos_last = xpos
326            xpos += 1.5
327         xpos += 0.5
328         app_name_xpos = float((xpos_first + xpos_last)) / 2
329         app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
330      xtics_str += ")"
331   
332      xmax_val = float(xpos - 1)
333   
334      # Writing the lines of "plot"
335      plot_str = ""
336      xpos = 0
337      first = True
338      column = 2
339      for i in range(0, len(nb_procs)):
340         if not first:
341            plot_str += ", \\\n    "
342         first = False
343         plot_str += "\"%s\" using ($1+%.1f):($%d) lc rgb %s notitle with boxes" % (data_metric_name, xpos, column, colors[i])
344         column += 1
345         xpos += 1.5
346   
347      template_file = open(metric_tmpl, 'r')
348      template = template_file.read()
349   
350      gp_commands = template % dict(xtics_str = xtics_str, app_labels = app_labels, ylabel_str = m_metric_name[metric], norm_factor_str = m_norm_factor_name[m_metric_norm[metric]], xmax_val = xmax_val, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + metric))
351   
352      create_file(gp_metric_name, gp_commands)
353     
354      # Creating the data file
355      width = 15
356      content = "#x_pos"
357      nb_spaces = width - len(content)
358      content += nb_spaces * ' '
359      for i in nb_procs:
360         content += "%-15d" % i
361      content += "\n"
362   
363      x_pos = 1
364      for app in apps:
365         # Computation of x_pos
366         content += "%-15f" % x_pos
367         x_pos += len(nb_procs) * 1.5 + 0.5
368         for i in nb_procs:
369            if m_metric_norm[metric] == "N":
370               content += "%-15d" % (metrics_val[prot][app][i][metric])
371            elif m_metric_norm[metric] == "P":
372               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / i)
373            elif m_metric_norm[metric] == "C":
374               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / exec_time[prot][app][i] * 1000)
375            elif m_metric_norm[metric] == "W":
376               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_write'])) # Number of writes
377            elif m_metric_norm[metric] == "R":
378               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_read'])) # Number of reads
379            elif m_metric_norm[metric] == "D":
380               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_direct'])) # Number of req.
381            elif is_numeric(m_metric_norm[metric]):
382               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][int(m_metric_norm[metric])][metric]))
383            else:
384               assert(False)
385   
386         app_name = m_app_name[app]
387         content += "#" + app_name + "\n"
388     
389      create_file(data_metric_name, content)
390   
391      # Calling gnuplot
392      print "gnuplot", gp_metric_name
393      subprocess.call([ 'gnuplot', gp_metric_name ])
394
395
396############################################################
397### Graph 5 : Stacked histogram with counters            ###
398############################################################
399
400# The following section creates a stacked histogram containing
401# the metrics in the "stacked_metric" list
402# It is normalized per application w.r.t the values on 256 procs
403
404for prot in single_protocols:
405   data_stacked_name = os.path.join(scripts_path, gen_dir, prot + '_stacked.dat')
406   gp_stacked_name   = os.path.join(scripts_path, gen_dir, prot + '_stacked.gp')
407   
408   norm_factor_value = nb_procs[-1]
409   
410   # Creating the gp file
411   template_file = open(stacked_tmpl, 'r')
412   template = template_file.read()
413   
414   xtics_str = "("
415   first = True
416   xpos = 1
417   app_labels = ""
418   for num_appli in range(0, len(apps)):
419      for i in nb_procs[1:len(nb_procs)]: # skipping values for 1 proc
420         if not first:
421            xtics_str += ", "
422         first = False
423         if i == nb_procs[1]:
424            xpos_first = xpos
425         xtics_str += "\"%d\" %d -1" % (i, xpos)
426         xpos_last = xpos
427         xpos += 1
428      xpos += 1
429      app_name_xpos = float((xpos_first + xpos_last)) / 2
430      app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
431   xtics_str += ")"
432   
433   plot_str = "newhistogram \"\""
434   n = 1
435   for stacked_metric in stacked_metrics:
436      plot_str += ", \\\n    " + "'" + data_stacked_name + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[stacked_metric] + "\""
437      n += 1
438   
439   ylabel_str = "Breakdown of Coherence Traffic Normalized w.r.t. \\nthe Values on %d Processors" % norm_factor_value
440   content = template % dict(svg_name = os.path.join(graph_dir, prot + '_stacked'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = "")
441   
442   create_file(gp_stacked_name, content)
443   
444   # Creating the data file
445   # Values are normalized by application, w.r.t. the number of requests for a given number of procs
446   content = "#"
447   for stacked_metric in stacked_metrics:
448      content += stacked_metric
449      content += ' ' + ' ' * (15 - len(stacked_metric))
450   content += "\n"
451   for app in apps:
452      if app != apps[0]:
453         for i in range(0, len(stacked_metrics)):
454            content += "%-15f" % 0.0
455         content += "\n"
456      for i in nb_procs[1:len(nb_procs)]:
457         for stacked_metric in stacked_metrics:
458            metric_val = metrics_val[prot][app][norm_factor_value]['total_stacked'] # Normalisation
459            if metric_val != 0:
460               content += "%-15f" % (float(metrics_val[prot][app][i][stacked_metric]) / metric_val)
461            else:
462               content += "%-15f" % 0
463         content += "\n"
464   
465   create_file(data_stacked_name, content)
466   # Calling gnuplot
467   print "gnuplot", gp_stacked_name
468   subprocess.call([ 'gnuplot', gp_stacked_name ])
469
470
471
472#################################################################################
473### Graph 6 : Stacked histogram with coherence cost compared to r/w cost      ###
474#################################################################################
475
476# The following section creates pairs of stacked histograms, normalized w.r.t. the first one.
477# The first one contains the cost of reads and writes, the second contains the cost
478# of m_inv, m_up and broadcasts (extrapolated)
479
480for prot in single_protocols:
481   data_cost_filename = os.path.join(scripts_path, gen_dir, prot + '_relative_cost.dat')
482   gp_cost_filename   = os.path.join(scripts_path, gen_dir, prot + '_relative_cost.gp')
483   
484   direct_cost_metrics = [ 'read_cost', 'write_cost' ]
485   #coherence_cost_metrics = ['update_cost', 'm_inv_cost', 'broadcast_cost' ]
486   coherence_cost_metrics = ['coherence_cost']
487   
488   # Creating the gp file
489   template_file = open(stacked_tmpl, 'r')
490   template = template_file.read()
491   
492   xtics_str = "("
493   first = True
494   xpos = 1
495   app_labels = ""
496   for num_appli in range(0, len(apps)):
497      first_proc = True
498      for i in nb_procs:
499         if i > 4:
500            if not first:
501               xtics_str += ", "
502            first = False
503            if first_proc:
504               first_proc = False
505               xpos_first = xpos
506            xtics_str += "\"%d\" %f -1" % (i, float(xpos + 0.5))
507            xpos_last = xpos
508            xpos += 3
509      app_name_xpos = float((xpos_first + xpos_last)) / 2
510      app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
511      #xpos += 1
512   xtics_str += ")"
513   
514   plot_str = "newhistogram \"\""
515   n = 1
516   for cost_metric in direct_cost_metrics + coherence_cost_metrics:
517      plot_str += ", \\\n    " + "'" + data_cost_filename + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[cost_metric] + "\""
518      n += 1
519   
520   ylabel_str = "Coherence Cost Compared to Direct Requests Cost,\\nNormalized per Application for each Number of Processors"
521   content = template % dict(svg_name = os.path.join(graph_dir, prot + '_rel_cost'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = "")
522   
523   create_file(gp_cost_filename, content)
524   
525   # Creating the data file
526   # Values are normalized by application, w.r.t. the number of requests for a given number of procs
527   content = "#"
528   for cost_metric in direct_cost_metrics:
529      content += cost_metric
530      content += ' ' + ' ' * (15 - len(cost_metric))
531   for cost_metric in coherence_cost_metrics:
532      content += cost_metric
533      content += ' ' + ' ' * (15 - len(cost_metric))
534   content += "\n"
535   for app in apps:
536      if app != apps[0]:
537         for i in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
538            content += "%-15f" % 0.0
539         content += "\n"
540      for i in nb_procs:
541         if i > 4:
542            for cost_metric in direct_cost_metrics:
543               if metrics_val[prot][app][i]['direct_cost'] == 0:
544                  print "Error: prot : ", prot, " - app : ", app, " - i : ", i
545                  content += "%-15f" % 0
546               else:
547                  content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[prot][app][i]['direct_cost'])
548            for cost_metric in coherence_cost_metrics:
549               content += "%-15f" % 0.0
550            content += "\n"
551            for cost_metric in direct_cost_metrics:
552               content += "%-15f" % 0.0
553            for cost_metric in coherence_cost_metrics:
554               if metrics_val[prot][app][i]['direct_cost'] == 0:
555                  print "Error: prot : ", prot, " - app : ", app, " - i : ", i
556                  content += "%-15f" % 0
557               else:
558                  content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[prot][app][i]['direct_cost'])
559            content += "\n"
560            if i != nb_procs[-1]:
561               for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
562                  content += "%-15f" % 0.0
563               content += "\n"
564   
565   create_file(data_cost_filename, content)
566   # Calling gnuplot
567   print "gnuplot", gp_cost_filename
568   subprocess.call([ 'gnuplot', gp_cost_filename ])
569
570
571#################################################################################
572### Joint Graphs to several architectures                                     ###
573#################################################################################
574
575if len(joint_protocols) == 0:
576   sys.exit()
577
578#################################################################################
579### Graph 7: Comparison of Speedups (normalized w.r.t. 1 proc on first arch)  ###
580#################################################################################
581
582
583for app in apps:
584
585   data_speedup_name = os.path.join(scripts_path, gen_dir, 'joint_' + app + '_speedup.dat')
586   gp_speedup_name   = os.path.join(scripts_path, gen_dir, 'joint_' + app + '_speedup.gp')
587   
588   # Creating data file
589   width = 15
590   content = "#nb_procs"
591   nb_spaces = width - len(content)
592   content += nb_spaces * ' '
593   content += "speedup\n"
594
595   for i in nb_procs:
596      content += "%-15d " % i
597      for prot in joint_protocols:
598         val = exec_time[prot][app][i]
599         content += "%-15f " % (exec_time[joint_protocols[0]][app][1] / float(val))
600      content += "\n"
601
602   create_file(data_speedup_name, content)
603   
604   # Creating the gp file
605   template_file = open(speedup_tmpl, 'r')
606   template = template_file.read()
607 
608   plot_str = ""
609   col = 2
610   for prot in joint_protocols:
611      if prot != joint_protocols[0]:
612         plot_str += ", \\\n     "
613      plot_str += "\"" + data_speedup_name + "\" using ($1):($" + str(col) + ") lc rgb %s title \"" % (colors[col - 2])  + m_prot_name[prot] + "\" with linespoint"
614      col += 1
615 
616   gp_commands = template % dict(appli = m_app_name[app] + " Normalized w.r.t. " + m_prot_name[joint_protocols[0]] + " on 1 Processor", nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, 'joint_' + app + '_speedup'))
617   
618   create_file(gp_speedup_name, gp_commands)
619   
620   # Calling gnuplot
621   print "gnuplot", gp_speedup_name
622   subprocess.call([ 'gnuplot', gp_speedup_name ])
623
624
625#################################################################################
626### Graph 8 : Joint Stacked histogram with coherence cost and r/w cost        ###
627#################################################################################
628
629# The following section creates pairs of stacked histograms for each arch for each number of proc for each app, normalized by (app x number of procs) (with first arch, R/W cost, first of the 2*num_arch histo). It is close to Graph 6
630
631data_cost_filename = os.path.join(scripts_path, gen_dir, 'joint_relative_cost.dat')
632gp_cost_filename   = os.path.join(scripts_path, gen_dir, 'joint_relative_cost.gp')
633   
634direct_cost_metrics = [ 'read_cost', 'write_cost', 'getm_cost' ]
635coherence_cost_metrics = ['update_cost', 'm_inv_cost', 'broadcast_cost', 'inval_ro_cost', 'cleanup_cost', 'cleanup_d_cost' ]
636 
637# Creating the gp file
638template_file = open(stacked_tmpl, 'r')
639template = template_file.read()
640   
641xtics_str = "("
642first = True
643xpos = 1 # successive x position of the center of the first bar in a application
644app_labels = ""
645prot_labels = ""
646for num_appli in range(0, len(apps)):
647   first_proc = True
648   for i in nb_procs:
649      if i > 4:
650         x = 0 # local var for computing position of protocol names
651         for prot in joint_protocols:
652            prot_labels += "set label \"%s\" at first %f, character 2 center font \"Times,10\"\n" % (m_prot_name[prot], float((xpos - 0.5)) + x) # -0.5 instead of +0.5, don't know why... (bug gnuplot?)
653            x += 2
654
655         if not first:
656            xtics_str += ", "
657         first = False
658         if first_proc:
659            first_proc = False
660            xpos_first = xpos
661         xtics_str += "\"%d\" %f -1" % (i, float(xpos - 0.5 + len(joint_protocols)))
662         xpos_last = xpos
663         xpos += 1 + len(joint_protocols) * 2
664   app_name_xpos = float((xpos_first + xpos_last)) / 2
665   app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
666   xpos += 1
667xtics_str += ")"
668
669plot_str = "newhistogram \"\""
670n = 1
671for cost_metric in direct_cost_metrics + coherence_cost_metrics:
672   plot_str += ", \\\n    " + "'" + data_cost_filename + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[cost_metric] + "\""
673   n += 1
674
675ylabel_str = "Coherence Cost and Direct Requests Cost,\\nNormalized per Application for each Number of Processors"
676content = template % dict(svg_name = os.path.join(graph_dir, 'joint_rel_cost'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = prot_labels)
677
678create_file(gp_cost_filename, content)
679
680# Creating the data file
681# Values are normalized by application, w.r.t. the number of requests for a given number of procs
682content = "#"
683for cost_metric in direct_cost_metrics:
684   content += cost_metric
685   content += ' ' + ' ' * (15 - len(cost_metric))
686for cost_metric in coherence_cost_metrics:
687   content += cost_metric
688   content += ' ' + ' ' * (15 - len(cost_metric))
689content += "\n"
690for app in apps:
691   if app != apps[0]:
692      for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
693         content += "%-15f" % 0.0
694      content += "\n"
695   for i in nb_procs:
696      if i > 4:
697         for prot in joint_protocols:
698            if metrics_val[joint_protocols[0]][app][i]['direct_cost'] == 0:
699               continue
700            for cost_metric in direct_cost_metrics:
701               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[joint_protocols[0]][app][i]['direct_cost'])
702            for cost_metric in coherence_cost_metrics:
703               content += "%-15f" % 0.0
704            content += "\n"
705            for cost_metric in direct_cost_metrics:
706               content += "%-15f" % 0.0
707            for cost_metric in coherence_cost_metrics:
708               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[joint_protocols[0]][app][i]['direct_cost'])
709            content += "\n"
710         if i != nb_procs[-1]:
711            for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
712               content += "%-15f" % 0.0
713            content += "\n"
714
715create_file(data_cost_filename, content)
716# Calling gnuplot
717print "gnuplot", gp_cost_filename
718subprocess.call([ 'gnuplot', gp_cost_filename ])
719
720
721
722#################################################################################
723### Graph 9 :         ###
724#################################################################################
725
726
727data_metric_filename = os.path.join(scripts_path, gen_dir, 'single_metric.dat')
728gp_metric_filename   = os.path.join(scripts_path, gen_dir, 'single_metric.gp')
729   
730metric = 'total_write'
731 
732# Creating the gp file
733template_file = open(stacked_tmpl, 'r')
734template = template_file.read()
735   
736xtics_str = "("
737first = True
738xpos = 0 # successive x position of the center of the first bar in a application
739app_labels = ""
740prot_labels = ""
741for num_appli in range(0, len(apps)):
742   first_proc = True
743   for i in nb_procs:
744      x = 0 # local var for computing position of protocol names
745      #for prot in joint_protocols:
746         #prot_labels += "set label \"%s\" at first %f, character 2 center font \"Times,10\"\n" % (m_prot_name[prot], float((xpos - 0.5)) + x) # -0.5 instead of +0.5, don't know why... (bug gnuplot?)
747         #x += 1
748
749      if not first:
750         xtics_str += ", "
751      first = False
752      if first_proc:
753         first_proc = False
754         xpos_first = xpos
755      xtics_str += "\"%d\" %f -1" % (i, float(xpos - 0.5 + len(joint_protocols)))
756      xpos_last = xpos
757      xpos += 1 + len(joint_protocols)
758   app_name_xpos = float((xpos_first + xpos_last)) / 2
759   app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
760   xpos += 1
761xtics_str += ")"
762
763n = 1
764plot_str = "newhistogram \"\""
765for prot in joint_protocols:
766   plot_str += ", \\\n    " + "'" + data_metric_filename + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[metric] + " for " + m_prot_name[prot] + "\""
767   n += 1
768
769ylabel_str = "%(m)s" % dict(m = m_metric_name[metric])
770content = template % dict(svg_name = os.path.join(graph_dir, 'single_metric'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = prot_labels)
771
772create_file(gp_metric_filename, content)
773
774# Creating the data file
775content = "#" + metric
776content += "\n"
777for app in apps:
778   if app != apps[0]:
779      for prot in joint_protocols:
780         for p in joint_protocols:
781            content += "%-15f " % 0.0
782         content += "\n"
783   for i in nb_procs:
784      for prot in joint_protocols:
785         for p in joint_protocols:
786            if p != prot:
787               content += "%-15f " % 0
788            else:
789               content += "%-15f " % (float(metrics_val[prot][app][i][metric]))
790         content += "\n"
791      if i != nb_procs[-1]:
792         for p in joint_protocols:
793            content += "%-15f " % 0.0
794         content += "\n"
795
796create_file(data_metric_filename, content)
797# Calling gnuplot
798print "gnuplot", gp_metric_filename
799subprocess.call([ 'gnuplot', gp_metric_filename ])
800
801
802
803
Note: See TracBrowser for help on using the repository browser.