source: trunk/platforms/tsar_generic_xbar/scripts/create_graphs.py @ 779

Last change on this file since 779 was 779, checked in by meunier, 10 years ago

Trunk:

  • Updating python scripts for simulations and graphs for tsar_generic_xbar (support for rwt and mesi)
  • Property svn:executable set to *
File size: 27.8 KB
Line 
1#!/usr/bin/python
2
3import subprocess
4import os
5import re
6import sys
7
8
9#apps = [ 'histogram', 'mandel', 'filter', 'radix', 'fft_ga' ]
10apps = [ 'histogram', 'mandel', 'filter', 'radix', 'radix_ga', 'fft', 'fft_ga', 'filt_ga', 'kmeans', 'pca', 'lu' ]
11#apps = [ 'fft' ]
12nb_procs = [ 1, 4, 8, 16, 32, 64, 128, 256 ]
13single_protocols = ['dhccp']
14#joint_protocols = ['dhccp', 'rwt']
15joint_protocols = []
16
17top_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
18scripts_path = os.path.join(top_path, 'scripts')
19counter_defs_name = os.path.join(scripts_path, "counter_defs.py")
20
21exec(file(counter_defs_name))
22
23gen_dir = 'generated'
24graph_dir = 'graph'
25template_dir = 'templates'
26data_dir = 'data'
27
28log_stdo_name = '_stdo_'
29log_term_name = '_term_'
30
31coherence_tmpl = os.path.join(scripts_path, template_dir, 'coherence_template.gp') # 1 graph per appli
32speedup_tmpl   = os.path.join(scripts_path, template_dir, 'speedup_template.gp')
33metric_tmpl    = os.path.join(scripts_path, template_dir, 'metric_template.gp') # 1 graph per metric
34stacked_tmpl   = os.path.join(scripts_path, template_dir, 'stacked_template.gp')
35
36
37
38def create_file(name, content):
39   file = open(name, 'w')
40   file.write(content)
41   file.close()
42   
43def is_numeric(s):
44   try:
45      float(s)
46      return True
47   except ValueError:
48      return False
49
50def get_x_y(nb_procs):
51   x = 1
52   y = 1
53   to_x = True
54   while (x * y * 4 < nb_procs):
55      if to_x:
56         x = x * 2
57      else:
58         y = y * 2
59      to_x = not to_x
60   return x, y
61
62
63
64# We first fill the m_metric_id table
65for metric in all_metrics:
66   for tag in all_tags:
67      if m_metric_tag[metric] == tag:
68         m_metric_id[tag] = metric
69         break
70
71
72# We start by processing all the log files
73# Term files are processed for exec time only
74# Init files are processed for all metrics
75exec_time = {}
76metrics_val = {}
77for prot in single_protocols:
78   metrics_val[prot] = {}
79   exec_time[prot] = {}
80   for app in apps:
81      exec_time[prot][app] = {}
82      metrics_val[prot][app] = {}
83      for i in nb_procs:
84         metrics_val[prot][app][i] = {}
85         log_stdo_file = os.path.join(scripts_path, data_dir, app + '_' + prot + log_stdo_name + str(i))
86         log_term_file = os.path.join(scripts_path, data_dir, app + '_' + prot + log_term_name + str(i))
87   
88         # Term
89         lines = open(log_term_file, 'r')
90         for line in lines:
91            tokens = line[:-1].split()
92            if len(tokens) > 0 and tokens[0] == "[PARALLEL_COMPUTE]":
93               exec_time[prot][app][i] = int(tokens[len(tokens) - 1])
94   
95         # Init files
96         lines = open(log_stdo_file, 'r')
97         for line in lines:
98            tokens = line[:-1].split()
99            if len(tokens) == 0:
100               continue
101            tag = tokens[0]
102            value = tokens[len(tokens) - 1]
103            pattern = re.compile('\[0[0-9][0-9]\]')
104            if pattern.match(tag):
105               metric = m_metric_id[tag]
106               if (not metrics_val[prot][app][i].has_key(metric) or tag == "[000]" or tag == "[001]"):
107                  # We don't add cycles of all Memcaches (they must be the same for all)
108                  metrics_val[prot][app][i][metric] = int(value)
109               else:
110                  metrics_val[prot][app][i][metric] += int(value)
111   
112# Completing unset metrics (i.e. they are not present in the data file) with 0
113for prot in single_protocols:
114   for app in apps:
115      for i in nb_procs:
116         for metric in all_metrics:
117            if metric not in metrics_val[prot][app][i]:
118               metrics_val[prot][app][i][metric] = 0
119
120# We make a 2nd pass to fill the derived fields, e.g. nb_total_updates
121for prot in single_protocols:
122   for app in apps:
123      for i in nb_procs:
124         x, y = get_x_y(i)
125         metrics_val[prot][app][i]['total_read']      = metrics_val[prot][app][i]['local_read']     + metrics_val[prot][app][i]['remote_read']
126         metrics_val[prot][app][i]['total_write']     = metrics_val[prot][app][i]['local_write']    + metrics_val[prot][app][i]['remote_write']
127         metrics_val[prot][app][i]['total_ll']        = metrics_val[prot][app][i]['local_ll']       + metrics_val[prot][app][i]['remote_ll']
128         metrics_val[prot][app][i]['total_sc']        = metrics_val[prot][app][i]['local_sc']       + metrics_val[prot][app][i]['remote_sc']
129         metrics_val[prot][app][i]['total_cas']       = metrics_val[prot][app][i]['local_cas']      + metrics_val[prot][app][i]['remote_cas']
130         metrics_val[prot][app][i]['total_update']    = metrics_val[prot][app][i]['local_update']   + metrics_val[prot][app][i]['remote_update']
131         metrics_val[prot][app][i]['total_m_inv']     = metrics_val[prot][app][i]['local_m_inv']    + metrics_val[prot][app][i]['remote_m_inv']
132         metrics_val[prot][app][i]['total_cleanup']   = metrics_val[prot][app][i]['local_cleanup']  + metrics_val[prot][app][i]['remote_cleanup']
133         metrics_val[prot][app][i]['total_direct']    = metrics_val[prot][app][i]['total_read']     + metrics_val[prot][app][i]['total_write']
134         metrics_val[prot][app][i]['total_ncc_to_cc'] = metrics_val[prot][app][i]['ncc_to_cc_read'] + metrics_val[prot][app][i]['ncc_to_cc_write']
135         metrics_val[prot][app][i]['direct_cost']     = metrics_val[prot][app][i]['read_cost']      + metrics_val[prot][app][i]['write_cost']
136         metrics_val[prot][app][i]['broadcast_cost']  = metrics_val[prot][app][i]['broadcast'] * (x * y - 1)
137         if metrics_val[prot][app][i]['broadcast'] < metrics_val[prot][app][i]['write_broadcast']:
138            # test to patch a bug in mem_cache
139            metrics_val[prot][app][i]['nonwrite_broadcast'] = 0
140         else:
141            metrics_val[prot][app][i]['nonwrite_broadcast'] = metrics_val[prot][app][i]['broadcast'] - metrics_val[prot][app][i]['write_broadcast']
142   
143         metrics_val[prot][app][i]['total_stacked'] = 0
144         for stacked_metric in stacked_metrics:
145            metrics_val[prot][app][i]['total_stacked'] += metrics_val[prot][app][i][stacked_metric]
146
147           
148print "mkdir -p", os.path.join(scripts_path, gen_dir)
149subprocess.call([ 'mkdir', '-p', os.path.join(scripts_path, gen_dir) ])
150
151print "mkdir -p", os.path.join(scripts_path, graph_dir)
152subprocess.call([ 'mkdir', '-p', os.path.join(scripts_path, graph_dir) ])
153
154############################################################
155### Graph 1 : Coherence traffic Cost per application     ###
156############################################################
157
158for prot in single_protocols:
159   for app in apps:
160      data_coherence_name = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_coherence.dat')
161      gp_coherence_name   = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_coherence.gp')
162   
163      # Creating the data file
164      width = 15
165      content = ""
166     
167      for metric in [ '#nb_procs' ] + grouped_metrics:
168         content += metric + " "
169         nb_spaces = width - len(metric)
170         content += nb_spaces * ' '
171      content += "\n"
172   
173      for i in nb_procs:
174         content += "%-15d " % i
175         for metric in grouped_metrics:
176            val = float(metrics_val[prot][app][i][metric]) / exec_time[prot][app][i] * 1000
177            content += "%-15f " % val
178         content += "\n"
179     
180      create_file(data_coherence_name, content)
181   
182      # Creating the gp file
183      template_file = open(coherence_tmpl, 'r')
184      template = template_file.read()
185     
186      plot_str = ""
187      col = 2
188      for metric in grouped_metrics:
189         if metric != grouped_metrics[0]:
190            plot_str += ", \\\n    "
191         plot_str += "\"" + data_coherence_name + "\" using ($1):($" + str(col) + ") lc rgb " + colors[col - 2] + " title \"" + m_metric_name[metric] + "\" with linespoint"
192         col += 1
193      gp_commands = template % dict(app_name = m_app_name[app], nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + app + '_coherence'))
194     
195      create_file(gp_coherence_name, gp_commands)
196     
197      # Calling gnuplot
198      print "gnuplot", gp_coherence_name
199      subprocess.call([ 'gnuplot', gp_coherence_name ])
200
201
202############################################################
203### Graph 2 : Speedup per Application                    ###
204############################################################
205
206for prot in single_protocols:
207   for app in apps:
208   
209      data_speedup_name = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_speedup.dat')
210      gp_speedup_name   = os.path.join(scripts_path, gen_dir, prot + '_' + app + '_speedup.gp')
211     
212      # Creating data file
213      width = 15
214      content = "#nb_procs"
215      nb_spaces = width - len(content)
216      content += nb_spaces * ' '
217      content += "speedup\n"
218   
219      for i in nb_procs:
220         content += "%-15d " % i
221         val = exec_time[prot][app][i]
222         content += "%-15f\n" % (exec_time[prot][app][1] / float(val))
223   
224      plot_str = "\"" + data_speedup_name + "\" using ($1):($2) lc rgb \"#654387\" title \"Speedup\" with linespoint"
225     
226      create_file(data_speedup_name, content)
227     
228      # Creating the gp file
229      template_file = open(speedup_tmpl, 'r')
230      template = template_file.read()
231     
232      gp_commands = template % dict(appli = m_app_name[app], nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + app + '_speedup'))
233     
234      create_file(gp_speedup_name, gp_commands)
235     
236      # Calling gnuplot
237      print "gnuplot", gp_speedup_name
238      subprocess.call([ 'gnuplot', gp_speedup_name ])
239
240
241############################################################
242### Graph 3 : All speedups on the same Graph             ###
243############################################################
244
245for prot in single_protocols:
246   # This graph uses the same template as the graph 2
247   data_speedup_name = os.path.join(scripts_path, gen_dir, prot + '_all_speedup.dat')
248   gp_speedup_name   = os.path.join(scripts_path, gen_dir, prot + '_all_speedup.gp')
249   
250   # Creating data file
251   width = 15
252   content = "#nb_procs"
253   nb_spaces = width - len(content)
254   content += (nb_spaces + 1) * ' '
255   for app in apps:
256      content += app + " "
257      content += (width - len(app)) * " "
258   content += "\n"
259   
260   for i in nb_procs:
261      content += "%-15d " % i
262      for app in apps:
263         val = exec_time[prot][app][i]
264         content += "%-15f " % (exec_time[prot][app][1] / float(val))
265      content += "\n"
266   
267   create_file(data_speedup_name, content)
268   
269   # Creating gp file
270   template_file = open(speedup_tmpl, 'r')
271   template = template_file.read()
272   
273   plot_str = ""
274   col = 2
275   for app in apps:
276      if app != apps[0]:
277         plot_str += ", \\\n     "
278      plot_str += "\"" + data_speedup_name + "\" using ($1):($" + str(col) + ") lc rgb %s title \"" % (colors[col - 2])  + m_app_name[app] + "\" with linespoint"
279      col += 1
280   
281   gp_commands = template % dict(appli = "All Applications", nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_all_speedup'))
282     
283   create_file(gp_speedup_name, gp_commands)
284     
285   # Calling gnuplot
286   print "gnuplot", gp_speedup_name
287   subprocess.call([ 'gnuplot', gp_speedup_name ])
288
289
290############################################################
291### Graph 4 : Graph per metric                           ###
292############################################################
293
294# The following section creates the graphs grouped by measure (e.g. #broadcasts)
295# The template file cannot be easily created otherwise it would not be generic
296# in many ways. This is why it is mainly created here.
297# Graphs are created for metric in the "individual_metrics" list
298
299for prot in single_protocols:
300   for metric in individual_metrics:
301      data_metric_name = os.path.join(scripts_path, gen_dir, prot + '_' + metric + '.dat')
302      gp_metric_name   = os.path.join(scripts_path, gen_dir, prot + '_' + metric + '.gp')
303   
304      # Creating the gp file
305      # Setting xtics, i.e. number of procs for each application
306      xtics_str = "("
307      first = True
308      xpos = 1
309      app_labels = ""
310      for num_appli in range(0, len(apps)):
311         for i in nb_procs:
312            if not first:
313               xtics_str += ", "
314            first = False
315            if i == nb_procs[0]:
316               xpos_first = xpos
317            xtics_str += "\"%d\" %.1f" % (i, xpos)
318            xpos_last = xpos
319            xpos += 1.5
320         xpos += 0.5
321         app_name_xpos = float((xpos_first + xpos_last)) / 2
322         app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
323      xtics_str += ")"
324   
325      xmax_val = float(xpos - 1)
326   
327      # Writing the lines of "plot"
328      plot_str = ""
329      xpos = 0
330      first = True
331      column = 2
332      for i in range(0, len(nb_procs)):
333         if not first:
334            plot_str += ", \\\n    "
335         first = False
336         plot_str += "\"%s\" using ($1+%.1f):($%d) lc rgb %s notitle with boxes" % (data_metric_name, xpos, column, colors[i])
337         column += 1
338         xpos += 1.5
339   
340      template_file = open(metric_tmpl, 'r')
341      template = template_file.read()
342   
343      gp_commands = template % dict(xtics_str = xtics_str, app_labels = app_labels, ylabel_str = m_metric_name[metric], norm_factor_str = m_norm_factor_name[m_metric_norm[metric]], xmax_val = xmax_val, plot_str = plot_str, svg_name = os.path.join(graph_dir, prot + '_' + metric))
344   
345      create_file(gp_metric_name, gp_commands)
346     
347      # Creating the data file
348      width = 15
349      content = "#x_pos"
350      nb_spaces = width - len(content)
351      content += nb_spaces * ' '
352      for i in nb_procs:
353         content += "%-15d" % i
354      content += "\n"
355   
356      x_pos = 1
357      for app in apps:
358         # Computation of x_pos
359         content += "%-15f" % x_pos
360         x_pos += len(nb_procs) * 1.5 + 0.5
361         for i in nb_procs:
362            if m_metric_norm[metric] == "N":
363               content += "%-15d" % (metrics_val[prot][app][i][metric])
364            elif m_metric_norm[metric] == "P":
365               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / i)
366            elif m_metric_norm[metric] == "C":
367               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / exec_time[prot][app][i] * 1000)
368            elif m_metric_norm[metric] == "W":
369               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_write'])) # Number of writes
370            elif m_metric_norm[metric] == "R":
371               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_read'])) # Number of reads
372            elif m_metric_norm[metric] == "D":
373               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][i]['total_direct'])) # Number of req.
374            elif is_numeric(m_metric_norm[metric]):
375               content += "%-15f" % (float(metrics_val[prot][app][i][metric]) / float(metrics_val[prot][app][int(m_metric_norm[metric])][metric]))
376            else:
377               assert(False)
378   
379         app_name = m_app_name[app]
380         content += "#" + app_name + "\n"
381     
382      create_file(data_metric_name, content)
383   
384      # Calling gnuplot
385      print "gnuplot", gp_metric_name
386      subprocess.call([ 'gnuplot', gp_metric_name ])
387
388
389############################################################
390### Graph 5 : Stacked histogram with counters            ###
391############################################################
392
393# The following section creates a stacked histogram containing
394# the metrics in the "stacked_metric" list
395# It is normalized per application w.r.t the values on 256 procs
396
397for prot in single_protocols:
398   data_stacked_name = os.path.join(scripts_path, gen_dir, prot + '_stacked.dat')
399   gp_stacked_name   = os.path.join(scripts_path, gen_dir, prot + '_stacked.gp')
400   
401   norm_factor_value = 256
402   
403   # Creating the gp file
404   template_file = open(stacked_tmpl, 'r')
405   template = template_file.read()
406   
407   xtics_str = "("
408   first = True
409   xpos = 1
410   app_labels = ""
411   for num_appli in range(0, len(apps)):
412      for i in nb_procs:
413         if not first:
414            xtics_str += ", "
415         first = False
416         if i == nb_procs[0]:
417            xpos_first = xpos
418         xtics_str += "\"%d\" %d -1" % (i, xpos)
419         xpos_last = xpos
420         xpos += 1
421      xpos += 1
422      app_name_xpos = float((xpos_first + xpos_last)) / 2
423      app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
424   xtics_str += ")"
425   
426   plot_str = "newhistogram \"\""
427   n = 1
428   for stacked_metric in stacked_metrics:
429      plot_str += ", \\\n    " + "'" + data_stacked_name + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[stacked_metric] + "\""
430      n += 1
431   
432   ylabel_str = "Breakdown of Coherence Traffic Normalized w.r.t. \\nthe Values on %d Processors" % norm_factor_value
433   content = template % dict(svg_name = os.path.join(graph_dir, prot + '_stacked'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = "")
434   
435   create_file(gp_stacked_name, content)
436   
437   # Creating the data file
438   # Values are normalized by application, w.r.t. the number of requests for a given number of procs
439   content = "#"
440   for stacked_metric in stacked_metrics:
441      content += stacked_metric
442      content += ' ' + ' ' * (15 - len(stacked_metric))
443   content += "\n"
444   for app in apps:
445      if app != apps[0]:
446         for i in range(0, len(stacked_metrics)):
447            content += "%-15f" % 0.0
448         content += "\n"
449      for i in nb_procs:
450         for stacked_metric in stacked_metrics:
451            content += "%-15f" % (float(metrics_val[prot][app][i][stacked_metric]) / metrics_val[prot][app][norm_factor_value]['total_stacked'])
452         content += "\n"
453   
454   create_file(data_stacked_name, content)
455   # Calling gnuplot
456   print "gnuplot", gp_stacked_name
457   subprocess.call([ 'gnuplot', gp_stacked_name ])
458
459
460
461#################################################################################
462### Graph 6 : Stacked histogram with coherence cost compared to r/w cost      ###
463#################################################################################
464
465# The following section creates pairs of stacked histograms, normalized w.r.t. the first one.
466# The first one contains the cost of reads and writes, the second contains the cost
467# of m_inv, m_up and broadcasts (extrapolated)
468
469for prot in single_protocols:
470   data_cost_filename = os.path.join(scripts_path, gen_dir, prot + '_relative_cost.dat')
471   gp_cost_filename   = os.path.join(scripts_path, gen_dir, prot + '_relative_cost.gp')
472   
473   direct_cost_metrics = [ 'read_cost', 'write_cost' ]
474   coherence_cost_metrics = ['update_cost', 'm_inv_cost', 'broadcast_cost' ]
475   
476   # Creating the gp file
477   template_file = open(stacked_tmpl, 'r')
478   template = template_file.read()
479   
480   xtics_str = "("
481   first = True
482   xpos = 1
483   app_labels = ""
484   for num_appli in range(0, len(apps)):
485      first_proc = True
486      for i in nb_procs:
487         if i > 4:
488            if not first:
489               xtics_str += ", "
490            first = False
491            if first_proc:
492               first_proc = False
493               xpos_first = xpos
494            xtics_str += "\"%d\" %f -1" % (i, float(xpos + 0.5))
495            xpos_last = xpos
496            xpos += 3
497      app_name_xpos = float((xpos_first + xpos_last)) / 2
498      app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
499      #xpos += 1
500   xtics_str += ")"
501   
502   plot_str = "newhistogram \"\""
503   n = 1
504   for cost_metric in direct_cost_metrics + coherence_cost_metrics:
505      plot_str += ", \\\n    " + "'" + data_cost_filename + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[cost_metric] + "\""
506      n += 1
507   
508   ylabel_str = "Coherence Cost Compared to Direct Requests Cost,\\nNormalized per Application for each Number of Processors"
509   content = template % dict(svg_name = os.path.join(graph_dir, prot + '_rel_cost'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = "")
510   
511   create_file(gp_cost_filename, content)
512   
513   # Creating the data file
514   # Values are normalized by application, w.r.t. the number of requests for a given number of procs
515   content = "#"
516   for cost_metric in direct_cost_metrics:
517      content += cost_metric
518      content += ' ' + ' ' * (15 - len(cost_metric))
519   for cost_metric in coherence_cost_metrics:
520      content += cost_metric
521      content += ' ' + ' ' * (15 - len(cost_metric))
522   content += "\n"
523   for app in apps:
524      if app != apps[0]:
525         for i in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
526            content += "%-15f" % 0.0
527         content += "\n"
528      for i in nb_procs:
529         if i > 4:
530            for cost_metric in direct_cost_metrics:
531               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[prot][app][i]['direct_cost'])
532            for cost_metric in coherence_cost_metrics:
533               content += "%-15f" % 0.0
534            content += "\n"
535            for cost_metric in direct_cost_metrics:
536               content += "%-15f" % 0.0
537            for cost_metric in coherence_cost_metrics:
538               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[prot][app][i]['direct_cost'])
539            content += "\n"
540            if i != nb_procs[-1]:
541               for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
542                  content += "%-15f" % 0.0
543               content += "\n"
544   
545   create_file(data_cost_filename, content)
546   # Calling gnuplot
547   print "gnuplot", gp_cost_filename
548   subprocess.call([ 'gnuplot', gp_cost_filename ])
549
550
551#################################################################################
552### Joint Graphs to several architectures                                     ###
553#################################################################################
554
555if len(joint_protocols) == 0:
556   sys.exit()
557
558#################################################################################
559### Graph 7: Comparison of Speedups (normalized w.r.t. 1 proc on first arch)  ###
560#################################################################################
561
562
563for app in apps:
564
565   data_speedup_name = os.path.join(scripts_path, gen_dir, 'joint_' + app + '_speedup.dat')
566   gp_speedup_name   = os.path.join(scripts_path, gen_dir, 'joint_' + app + '_speedup.gp')
567   
568   # Creating data file
569   width = 15
570   content = "#nb_procs"
571   nb_spaces = width - len(content)
572   content += nb_spaces * ' '
573   content += "speedup\n"
574
575   for i in nb_procs:
576      content += "%-15d " % i
577      for prot in joint_protocols:
578         val = exec_time[prot][app][i]
579         content += "%-15f " % (exec_time[joint_protocols[0]][app][1] / float(val))
580      content += "\n"
581
582   create_file(data_speedup_name, content)
583   
584   # Creating the gp file
585   template_file = open(speedup_tmpl, 'r')
586   template = template_file.read()
587 
588   plot_str = ""
589   col = 2
590   for prot in joint_protocols:
591      if prot != joint_protocols[0]:
592         plot_str += ", \\\n     "
593      plot_str += "\"" + data_speedup_name + "\" using ($1):($" + str(col) + ") lc rgb %s title \"" % (colors[col - 2])  + m_prot_name[prot] + "\" with linespoint"
594      col += 1
595 
596   gp_commands = template % dict(appli = m_app_name[app] + " Normalized w.r.t. " + m_prot_name[joint_protocols[0]] + " on 1 Processor", nb_procs = nb_procs[-1] + 1, plot_str = plot_str, svg_name = os.path.join(graph_dir, 'joint_' + app + '_speedup'))
597   
598   create_file(gp_speedup_name, gp_commands)
599   
600   # Calling gnuplot
601   print "gnuplot", gp_speedup_name
602   subprocess.call([ 'gnuplot', gp_speedup_name ])
603
604
605#################################################################################
606### Graph 8 : Joint Stacked histogram with coherence cost and r/w cost        ###
607#################################################################################
608
609# The following section creates pairs of stacked histograms for each arch for each number of proc for each app, normalized by (app x number of procs) (with first arch, R/W cost, first of the 2*num_arch histo). It is close to Graph 6
610
611data_cost_filename = os.path.join(scripts_path, gen_dir, 'joint_relative_cost.dat')
612gp_cost_filename   = os.path.join(scripts_path, gen_dir, 'joint_relative_cost.gp')
613   
614direct_cost_metrics = [ 'read_cost', 'write_cost' ]
615coherence_cost_metrics = ['update_cost', 'm_inv_cost', 'broadcast_cost' ]
616   
617# Creating the gp file
618template_file = open(stacked_tmpl, 'r')
619template = template_file.read()
620   
621xtics_str = "("
622first = True
623xpos = 1 # successive x position of the center of the first bar in a application
624app_labels = ""
625prot_labels = ""
626for num_appli in range(0, len(apps)):
627   first_proc = True
628   for i in nb_procs:
629      if i > 4:
630         x = 0 # local var for computing position of protocol names
631         for prot in joint_protocols:
632            prot_labels += "set label \"%s\" at first %f, character 2 center font \"Times,10\"\n" % (m_prot_name[prot], float((xpos - 0.5)) + x) # -0.5 instead of +0.5, don't know why... (bug gnuplot?)
633            x += 2
634
635         if not first:
636            xtics_str += ", "
637         first = False
638         if first_proc:
639            first_proc = False
640            xpos_first = xpos
641         xtics_str += "\"%d\" %f -1" % (i, float(xpos - 0.5 + len(joint_protocols)))
642         xpos_last = xpos
643         xpos += 1 + len(joint_protocols) * 2
644   app_name_xpos = float((xpos_first + xpos_last)) / 2
645   app_labels += "set label \"%s\" at first %f,character 1 center font \"Times,12\"\n" % (m_app_name[apps[num_appli]], app_name_xpos)
646   xpos += 1
647xtics_str += ")"
648
649plot_str = "newhistogram \"\""
650n = 1
651for cost_metric in direct_cost_metrics + coherence_cost_metrics:
652   plot_str += ", \\\n    " + "'" + data_cost_filename + "'" + " using " + str(n) + " lc rgb " + colors[n] + " title \"" + m_metric_name[cost_metric] + "\""
653   n += 1
654
655ylabel_str = "Coherence Cost and Direct Requests Cost,\\nNormalized per Application for each Number of Processors"
656content = template % dict(svg_name = os.path.join(graph_dir, 'joint_rel_cost'), xtics_str = xtics_str, plot_str = plot_str, ylabel_str = ylabel_str, app_labels = app_labels, prot_labels = prot_labels)
657
658create_file(gp_cost_filename, content)
659
660# Creating the data file
661# Values are normalized by application, w.r.t. the number of requests for a given number of procs
662content = "#"
663for cost_metric in direct_cost_metrics:
664   content += cost_metric
665   content += ' ' + ' ' * (15 - len(cost_metric))
666for cost_metric in coherence_cost_metrics:
667   content += cost_metric
668   content += ' ' + ' ' * (15 - len(cost_metric))
669content += "\n"
670for app in apps:
671   if app != apps[0]:
672      for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
673         content += "%-15f" % 0.0
674      content += "\n"
675   for i in nb_procs:
676      if i > 4:
677         for prot in joint_protocols:
678            for cost_metric in direct_cost_metrics:
679               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[joint_protocols[0]][app][i]['direct_cost'])
680            for cost_metric in coherence_cost_metrics:
681               content += "%-15f" % 0.0
682            content += "\n"
683            for cost_metric in direct_cost_metrics:
684               content += "%-15f" % 0.0
685            for cost_metric in coherence_cost_metrics:
686               content += "%-15f" % (float(metrics_val[prot][app][i][cost_metric]) / metrics_val[joint_protocols[0]][app][i]['direct_cost'])
687            content += "\n"
688         if i != nb_procs[-1]:
689            for j in range(0, len(direct_cost_metrics) + len(coherence_cost_metrics)):
690               content += "%-15f" % 0.0
691            content += "\n"
692
693create_file(data_cost_filename, content)
694# Calling gnuplot
695print "gnuplot", gp_cost_filename
696subprocess.call([ 'gnuplot', gp_cost_filename ])
697
698
699
700
701
Note: See TracBrowser for help on using the repository browser.