Changeset 65 for sources/src


Ignore:
Timestamp:
Oct 23, 2019, 12:53:07 PM (5 years ago)
Author:
bouyer
Message:

Various performance improvements for the parallel systemcass: cache-aligned
data structures, write only when needed, disable some unneeded barriers.

Fix bug in the non-openmp case: a pointer was not initialized

various style updates

Location:
sources/src
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • sources/src/casc.h

    r63 r65  
    2323
    2424EXTERN char unstable;
    25 EXTERN int32_t * pending_write_vector_nb;
     25EXTERN int32_t *pending_write_vector_nb;
    2626#ifdef _OPENMP
    2727#pragma omp threadprivate (pending_write_vector_nb)
  • sources/src/entity.cc

    r60 r65  
    395395    equi_list_t::iterator i;
    396396    int index = 0;
    397     for (i = equi_list.begin(); i != equi_list.end(); ++i) {
    398         sc_interface *out = get_out_port(*i);
    399         if (out) {
    400             bind_equi_to_table(*i, out->get_pointer());
    401         }
    402         else {
    403             sc_interface * reg = get_signal(*i);
    404             if (reg == NULL) {
    405                 reg = get_localvar(*i);
    406             }
    407             if (reg) {
    408                 bind_equi_to_table(*i, reg->get_pointer());
    409             }
    410             else {
    411                 bind_equi_to_table(*i, &(equi_table[index]));
    412                 index += (i->begin()->data_size_in_bytes() - 1) / sizeof(tab_t) + 1;
    413             }
    414         }
    415 #if 0
    416         sc_interface *reg = get_signal (*i);
    417         sc_interface *out = get_out_port (*i);
    418         if (reg) {
    419             std::cerr << "binding " << *i << " to reg "
    420                 << reg << std::endl;
    421             bind_equi_to_table (*i, reg->get_pointer ());
    422         } else if (out) {
    423             std::cerr << "binding " << *i << " to out "
    424                 << out << std::endl;
    425             bind_equi_to_table (*i, out->get_pointer ());
    426         } else {
    427             reg = get_localvar (*i);
    428             if (reg) {
    429                 std::cerr << "binding " << *i << " to localvar "
    430                     << reg << std::endl;
    431                 bind_equi_to_table (*i, reg->get_pointer ());
    432             } else {
    433                 std::cerr << "binding " << *i << " to index "
    434                     << index << std::endl;
    435                 bind_equi_to_table (*i, &(equi_table[index]));
    436                 index += (i->begin()->data_size_in_bytes() - 1) / sizeof(tab_t) + 1;
    437             }
    438         }
     397    for (i = equi_list.begin (); i != equi_list.end (); ++i)            {
     398#if 1
     399                        sc_interface *out = get_out_port (*i);
     400                        if (out) {
     401                                bind_equi_to_table (*i, out->get_pointer ());
     402                        } else {
     403                                sc_interface *reg = get_signal (*i);
     404                                if (reg == NULL)
     405          reg = get_localvar (*i);
     406                                if (reg) {
     407          bind_equi_to_table (*i, reg->get_pointer ());
     408                                } else {
     409                bind_equi_to_table (*i, &(equi_table[index]));
     410                                  index += (i->begin()->data_size_in_bytes() - 1) / sizeof(tab_t) + 1;
     411                                }
     412                        }
     413#else
     414                        sc_interface *reg = get_signal (*i);
     415                        sc_interface *out = get_out_port (*i);
     416                        if (reg) {
     417                                std::cerr << "binding " << *i << " to reg "
     418                                    << reg << std::endl;
     419                                  bind_equi_to_table (*i, reg->get_pointer ());
     420                        } else if (out) {
     421                                std::cerr << "binding " << *i << " to out "
     422                                    << out << std::endl;
     423                                bind_equi_to_table (*i, out->get_pointer ());
     424                        } else {
     425                                reg = get_localvar (*i);
     426                                if (reg) {
     427                                        std::cerr << "binding " << *i << " to localvar "
     428                                            << reg << std::endl;
     429                                          bind_equi_to_table (*i, reg->get_pointer ());
     430                                } else {
     431                                        std::cerr << "binding " << *i << " to index "
     432                                            << index << std::endl;
     433                                        bind_equi_to_table (*i, &(equi_table[index]));
     434                                        index += (i->begin()->data_size_in_bytes() - 1) / sizeof(tab_t) + 1;
     435                                }
     436                        }
    439437#endif
    440438    }
  • sources/src/gen_code.cc

    r63 r65  
    5050#include <fstream>
    5151#ifdef _OPENMP
    52     #include <omp.h>
    53 #endif
     52#include <omp.h>
     53#endif
     54#include <sys/types.h>
     55#include <unistd.h>
    5456
    5557#include "internal.h"
     
    240242}
    241243
    242 
    243 char * gen_scheduling_code_for_dynamic_link(method_process_list_t & transition_func_list,
    244         method_process_list_t & moore_func_list,
    245         strong_component_list_t * strongcomponents) {
    246     if (dump_stage) {
    247         cerr << "Generating C code for scheduling...\n";
    248     }
     244char *
     245gen_scheduling_code_for_dynamic_link (
     246               method_process_list_t           &transition_func_list,
     247               method_process_list_t           &moore_func_list,
     248               strong_component_list_t         *strongcomponents)
     249{
     250  if (dump_stage)
     251    cerr << "Generating C code for scheduling...\n";
    249252
    250253    // open temporary file
     
    258261    }
    259262
    260     o << "// generated by " << sc_version() << endl
    261         << "#include <casc.h>\n\n" << "#include <cstdio>\n\n"
    262         //  << "#include <iostream>\n\n"
    263         << "namespace sc_core {\n"
    264         << " typedef void (sc_module::*SC_ENTRY_FUNC)();\n"
    265         << " typedef void (*CASC_ENTRY_FUNC)(void *);\n";
     263  o << "// generated by " << sc_version () << endl
     264    << "#include <casc.h>\n\n"
     265    << "#include <cstdio>\n\n"
     266//  << "#include <iostream>\n\n"
     267    << "namespace sc_core {\n"
     268    << " typedef void (sc_module::*SC_ENTRY_FUNC)();\n"
     269    << " typedef void (*CASC_ENTRY_FUNC)(void *);\n";
    266270
    267271    const char * pmf_type = get_pmf_type();
     
    533537}
    534538
    535 
    536 void call_functions_in_parallel(function_call & fc) {
    537     int n = fc.func_number;
    538     int i;
    539     for (i = 0; i < n; ++i) {
    540 #if 0
    541         //defined(CONFIG_DEBUG)
    542         sc_module *m = (sc_module *) (fc.instance[i]);
    543         cerr << m->name() << endl;
    544         cerr << "thread #" << omp_get_thread_num() << endl;
     539void
     540call_functions_in_parallel (function_call &fc)
     541{
     542  int n = fc.func_number;
     543  int i;
     544  for (i = 0; i < n; ++i)
     545  {
     546#if 0 //defined(CONFIG_DEBUG)
     547    sc_module *m = (sc_module*)(fc.instance[i]);
     548    cerr << m->name () << endl;
     549    cerr << "thread #" << omp_get_thread_num () << endl;
    545550#endif
    546551        fc.function[i].pf(fc.instance[i]);
     
    566571 */
    567572
    568 unsigned int nb_func[2];
    569 static method_process_t **func_list[2];
     573unsigned int              nb_func[2];
     574static method_process_t        **func_list[2];
    570575static strong_component_list_t quasistatic_list;
    571576
     
    578583#endif
    579584
    580 static void Call(const method_process_t & m) {
    581     sc_module * mod = m.module;
    582     SC_ENTRY_FUNC func = m.func;
    583     //  CASC_ENTRY_FUNC   func = reinterpret_cast<CASC_ENTRY_FUNC> (m.func);
    584     // QM
    585     //cerr << "Exec " << mod->name() << "->" << m.name << endl;
    586     (mod->*func) ();
     585static
     586void
     587Call         (const method_process_t &m)
     588{
     589  sc_module        *mod  = m.module;
     590  SC_ENTRY_FUNC     func = m.func;
     591//  CASC_ENTRY_FUNC   func = reinterpret_cast<CASC_ENTRY_FUNC> (m.func);
     592  (mod->*func) ();
    587593}
    588594
     
    620626unsigned int num_omp_threads;
    621627
    622 void quasistatic_simulate_1_cycle(void) {
    623     int i;
    624 
    625     for (i = 0; i < nb_func[0]; ++i) {
    626         Call(*(func_list[0][i]));
    627     }
     628void quasistatic_simulate_1_cycle (void)
     629{
     630        int i;
     631
     632        for (i = 0; i < nb_func[0]; ++i)
     633                Call (*(func_list[0][i]));
     634#ifdef _OPENMP
    628635#define USE_BUSY_WAIT 1
    629     update();
     636#if 0
    630637#if USE_BUSY_WAIT
    631     expected_globaltime += num_omp_threads;
    632     if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime) {
    633         last_wait_up++;
    634     }
    635     __asm volatile("mfence");
    636     while (globaltime < expected_globaltime) {
    637         busy_wait_up++;
    638         __asm volatile("lfence");
    639     }
     638        expected_globaltime += num_omp_threads;
     639        if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime)
     640                last_wait_f0++;
     641        __asm volatile("mfence");
     642        while (globaltime < expected_globaltime) {
     643                busy_wait_f0++;
     644                __asm volatile("lfence");
     645        }
    640646#else
    641647    #ifdef _OPENMP
     
    643649    #endif
    644650#endif
    645 
    646     for (i = 0; i < nb_func[1]; ++i) {
    647         Call(*(func_list[1][i]));
    648     }
    649 
     651#endif
     652#endif // _OPENMP
     653
     654        update();
     655
     656#ifdef _OPENMP
    650657#if USE_BUSY_WAIT
    651     expected_globaltime += num_omp_threads;
    652     if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime) {
    653         last_wait_f1++;
    654     }
    655     __asm volatile("mfence");
    656     while (globaltime < expected_globaltime) {
    657         busy_wait_f1++;
    658         __asm volatile("lfence");
    659     }
     658        expected_globaltime += num_omp_threads;
     659        if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime)
     660                last_wait_up++;
     661        __asm volatile("mfence");
     662        while (globaltime < expected_globaltime) {
     663                busy_wait_up++;
     664                __asm volatile("lfence");
     665        }
     666
    660667#else
    661668    #ifdef _OPENMP
     
    663670    #endif
    664671#endif
    665     if (!quasistatic_list.empty()) {
     672#endif // _OPENMP
     673
     674        for (i = 0; i < nb_func[1]; ++i)
     675                Call (*(func_list[1][i]));
     676
    666677#ifdef _OPENMP
     678#if USE_BUSY_WAIT
     679        expected_globaltime += num_omp_threads;
     680        if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime)
     681                last_wait_f1++;
     682        __asm volatile("mfence");
     683        while (globaltime < expected_globaltime) {
     684                busy_wait_f1++;
     685                __asm volatile("lfence");
     686        }
     687#else
     688#pragma omp barrier
     689#endif
     690#endif // _OPENMP
     691
     692        if (!quasistatic_list.empty()) {
    667693#pragma omp master
    668 #endif
    669         {
    670             quasistatic_mealy_generation();
    671         }
     694                {
     695                quasistatic_mealy_generation ();
     696                }
     697#ifdef _OPENMP
    672698#if USE_BUSY_WAIT
    673         expected_globaltime += num_omp_threads;
    674         if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime) {
    675             last_wait_ml++;
    676         }
    677         __asm volatile("mfence");
    678         while (globaltime < expected_globaltime) {
    679             busy_wait_ml++;
    680             __asm volatile("lfence");
    681         }
     699                expected_globaltime += num_omp_threads;
     700                if (__sync_add_and_fetch(&globaltime, 1) == expected_globaltime)
     701                        last_wait_ml++;
     702                __asm volatile("mfence");
     703                while (globaltime < expected_globaltime) {
     704                        busy_wait_ml++;
     705                        __asm volatile("lfence");
     706                }
    682707#else
    683708    #ifdef _OPENMP
     
    685710    #endif
    686711#endif
    687     }
    688 }
    689 
    690 
    691 void gen_scheduling_code_for_quasistatic_func(method_process_list_t & transition_func_list,
    692         method_process_list_t & moore_func_list,
    693         strong_component_list_t * mealy_func_list) {
    694     if (dump_stage) {
    695         cerr << "Generating quasi static scheduling...\n";
    696     }
    697 
    698     nb_func[0] = transition_func_list.size();
    699     nb_func[1] = moore_func_list.size();
    700 
    701     func_list[0] = (method_process_t**) malloc(sizeof (method_process_t*) * nb_func[0]);
    702     func_list[1] = (method_process_t**) malloc(sizeof (method_process_t*) * nb_func[1]);
    703 
    704     unsigned int i;
    705     for (i = 0; i < nb_func[0]; ++i) {
    706         func_list[0][i] = (transition_func_list[i]);
    707     }
    708 
    709     for (i = 0; i < nb_func[1]; ++i) {
    710         func_list[1][i] = (moore_func_list[i]);
    711     }
    712 
    713     if (mealy_func_list != NULL) {
    714         quasistatic_list = *mealy_func_list;
    715     }
    716 
    717     if (dump_stage) {
    718         cerr << "Generating quasi static scheduling done.\n";
    719     }
     712#endif // _OPENMP
     713        }
     714
     715}
     716
     717void
     718gen_scheduling_code_for_quasistatic_func (
     719            method_process_list_t   &transition_func_list,
     720            method_process_list_t   &moore_func_list,
     721            strong_component_list_t *mealy_func_list)
     722{
     723  if (dump_stage)
     724    cerr << "Generating quasi static scheduling...\n";
     725
     726  nb_func[0] = transition_func_list.size();
     727  nb_func[1] = moore_func_list     .size();
     728
     729  func_list[0] = (method_process_t**) malloc (sizeof (method_process_t*) * nb_func[0]);
     730  func_list[1] = (method_process_t**) malloc (sizeof (method_process_t*) * nb_func[1]);
     731
     732  unsigned int i;
     733  for (i = 0; i < nb_func[0]; ++i)
     734    func_list[0][i] = (transition_func_list[i]);
     735
     736  for (i = 0; i < nb_func[1]; ++i)
     737    func_list[1][i] = (moore_func_list[i]);
     738
     739  if (mealy_func_list != NULL)
     740    quasistatic_list = *mealy_func_list;
     741
     742  if (dump_stage)
     743    cerr << "Generating quasi static scheduling done.\n";
    720744}
    721745} // end of sc_core namespace
  • sources/src/gen_code.h

    r63 r65  
    4242/* generate a scheduling code */
    4343extern void  gen_scheduling_code_for_quasistatic_func(
    44       method_process_list_t   &transition_list,
    45       method_process_list_t   &moore_list,
    46       strong_component_list_t *mealy_list);
    47 
     44                      method_process_list_t   &transition_list,
     45                      method_process_list_t   &moore_list,
     46                            strong_component_list_t *mealy_list);
    4847extern void  gen_scheduling_code_for_static_func(
    49       method_process_list_t   &transition_list,
    50       method_process_list_t   &moore_list,
    51       ProcessDependencyList   &mealy_list);
     48                      method_process_list_t   &transition_list,
     49                      method_process_list_t   &moore_list,
     50                            ProcessDependencyList   &mealy_list);
     51extern char *gen_scheduling_code_for_dynamic_link(
     52                      method_process_list_t   &transition_list,
     53                      method_process_list_t   &moore_list,
     54                            ProcessDependencyList   &mealy_list);
     55extern char *gen_scheduling_code_for_dynamic_link(
     56                      method_process_list_t   &transition_list,
     57                      method_process_list_t   &moore_list,
     58                            strong_component_list_t *strongcomponents);
    5259
    5360extern char * gen_scheduling_code_for_dynamic_link(
     
    8794#pragma omp master
    8895    {
    89         std::cerr << "begin of cycle #" << sc_simulation_time() << "\n";
    90     }
    91 #endif
    92 
    93         func_simulate_1_cycle();
     96        std::cerr << "begin of cycle #" << sc_simulation_time () << "\n";
     97    }
     98#endif
     99
     100        func_simulate_1_cycle (); 
    94101
    95102        ++nb_cycles;
     
    97104#pragma omp master
    98105    {
    99         std::cerr << "end of cycle\n";
     106        std::cerr << "end of cycle\n";
    100107    }
    101108#endif
    102109}
    103110
    104 inline void internal_sc_cycle1(int number_of_cycles) { 
    105     extern unsigned long long busy_wait_f0, busy_wait_f1, busy_wait_up, busy_wait_ml;
    106     extern unsigned long long last_wait_f0, last_wait_f1, last_wait_up, last_wait_ml;
    107     extern unsigned int nb_func[2];
    108     extern unsigned int expected_globaltime;
    109     extern volatile unsigned int globaltime;
     111inline void
     112internal_sc_cycle1 (int number_of_cycles)
     113
     114
     115extern unsigned long long busy_wait_f0, busy_wait_f1, busy_wait_up, busy_wait_ml;
     116extern unsigned long long last_wait_f0, last_wait_f1, last_wait_up, last_wait_ml;
     117extern unsigned int nb_func[2];
    110118#ifdef _OPENMP
    111119#pragma omp threadprivate (busy_wait_f0, busy_wait_f1, busy_wait_up, busy_wait_ml, nb_func)
    112120#pragma omp threadprivate (last_wait_f0, last_wait_f1, last_wait_up, last_wait_ml)
     121#endif
     122extern unsigned int expected_globaltime;
     123extern volatile unsigned int globaltime;
     124#ifdef _OPENMP
    113125#pragma omp threadprivate (expected_globaltime)
    114126#pragma omp shared (globaltime)
    115127#endif
    116     extern unsigned int num_omp_threads;
     128
     129extern unsigned int num_omp_threads;
    117130
    118131
     
    120133#pragma omp parallel
    121134#endif
    122     {
    123         int cyclecount = number_of_cycles;
    124         busy_wait_f0 = busy_wait_f1 = busy_wait_up = busy_wait_ml = total_assig = 0;
    125         last_wait_f0 = last_wait_f1 = last_wait_up = last_wait_ml = 0;
    126 
    127         expected_globaltime = 0;
    128 #ifdef _OPENMP
    129 #pragma omp master
    130 #endif
    131         {
    132             globaltime = 0;
    133 #ifdef _OPENMP
    134             num_omp_threads = omp_get_num_threads();
     135  {
     136        int cyclecount = number_of_cycles;
     137        busy_wait_f0 = busy_wait_f1 = busy_wait_up = busy_wait_ml = total_assig = 0;
     138        last_wait_f0 = last_wait_f1 = last_wait_up = last_wait_ml = 0;
     139
     140        expected_globaltime = 0;
     141#ifdef _OPENMP
     142#pragma omp master
     143#endif
     144    {
     145        globaltime = 0;
     146#ifdef _OPENMP
     147  num_omp_threads = omp_get_num_threads();
    135148#else
    136             num_omp_threads = 1;
    137 #endif
    138         }
     149  num_omp_threads = 1;
     150#endif
     151    }
    139152
    140153#ifdef _OPENMP
    141154#pragma omp barrier
    142155#endif
    143         while (!(have_to_stop || cyclecount == 0)) {
    144         //while (!(have_to_stop || number_of_cycles == 0)) {
    145 #ifdef _OPENMP
    146 #pragma omp master
    147 #endif
    148             {
    149                 trace_all(false);
    150             }
    151             internal_sc_cycle2();
    152 #ifdef _OPENMP
    153 #pragma omp master
    154 #endif
    155             {
    156                 trace_all(true);
    157             }
    158             cyclecount = (number_of_cycles < 0) ? number_of_cycles : cyclecount - 1;
    159             // number_of_cycles = (number_of_cycles < 0) ? number_of_cycles : number_of_cycles - 1;
    160         }
     156        while (!((have_to_stop) | (cyclecount == 0))) {
     157#ifdef _OPENMP
     158#pragma omp master
     159#endif
     160            {
     161                trace_all  (false);
     162            }
     163                internal_sc_cycle2 ();
     164#ifdef _OPENMP
     165#pragma omp master
     166#endif
     167            {
     168                trace_all  (true);
     169            }
     170            cyclecount = (number_of_cycles<0)?number_of_cycles:cyclecount-1;
     171        }
    161172#ifdef _OPENMP
    162173#pragma omp barrier
    163 #endif
    164 #if 0
    165 #ifdef _OPENMP
    166174#pragma omp critical
    167         {
    168             std::cerr << "Thread " << omp_get_thread_num() << " busy_wait " <<
    169                 busy_wait_f0 << " " << busy_wait_up << " " <<
    170                 busy_wait_f1 << " " << busy_wait_ml << std::endl;
    171         }
     175    {
     176        std::cerr << "Thread " << omp_get_thread_num() << " busy_wait " <<
     177            busy_wait_f0 << " " << busy_wait_up << " " <<
     178            busy_wait_f1 << " " << busy_wait_ml << std::endl;
     179    }
    172180#pragma omp critical
    173         {
    174             std::cerr << "Thread " << omp_get_thread_num() << " last_wait " <<
    175                 last_wait_f0 << " " << last_wait_up << " " <<
    176                 last_wait_f1 << " " << last_wait_ml << std::endl;
    177         }
     181    {
     182        std::cerr << "Thread " << omp_get_thread_num() << " last_wait " <<
     183            last_wait_f0 << " " << last_wait_up << " " <<
     184            last_wait_f1 << " " << last_wait_ml << std::endl;
     185    }
    178186#pragma omp critical
    179         {
    180             std::cerr << "Thread " << omp_get_thread_num() << " nfuncs "
    181                 << nb_func[0] << " " << nb_func[1] << " total_assig " <<
    182                 total_assig << std::endl;
    183         }
    184 #endif
    185 #endif
    186     }
     187    {
     188        std::cerr << "Thread " << omp_get_thread_num() << " nfuncs "
     189          << nb_func[0] << " " << nb_func[1] << " total_assig " <<
     190           total_assig << std::endl;
     191    }
     192#endif // _OPENMP
     193  }
    187194}
    188195
     
    206213#ifdef _OPENMP
    207214#pragma omp parallel
    208 #endif
    209         update();
    210         func_combinationals();
    211     }
    212 
    213     internal_sc_cycle1((int) duration);
    214 
    215     // don't need to do func_combinationals since 'unstable' flag is now false
    216     if (is_posted_write()) {
     215#endif // _OPENMP
     216        update ();
     217    func_combinationals ();
     218  }
     219       
     220        internal_sc_cycle1 ((int)duration);
     221
     222  // don't need to do func_combinationals since 'unstable' flag is now false
     223  if (is_posted_write ()) {
    217224#ifdef _OPENMP
    218225#pragma omp parallel
    219 #endif
    220         update();
    221         func_combinationals();
    222     }
     226#endif // _OPENMP
     227      update ();
     228    func_combinationals ();
     229  }
    223230}
    224231
  • sources/src/global_functions.cc

    r63 r65  
    232232    }
    233233
    234     /*
    235      * Initialize the signals table
    236      */
    237     create_signals_table();
    238     bind_to_table();
    239     if (dump_netlist_info) {
    240         print_table(cerr);
    241         cerr << endl;
    242         print_table_stats(cerr);
    243         cerr << endl;
    244     }
    245     // Init variables to be able to run combinational functions
    246     pending_write_vector_capacity = get_signal_table_size();
    247 
    248     assert(pending_write_vector_capacity != 0);
     234  /*
     235   * Initialize the signals table
     236   */
     237  create_signals_table ();
     238  bind_to_table ();
     239  if (dump_netlist_info)
     240  {
     241    print_table (cerr);
     242    cerr << endl;
     243    print_table_stats (cerr);
     244    cerr << endl;
     245  }
     246
     247  // Init variables to be able to run combinational functions
     248  pending_write_vector_capacity = get_signal_table_size ();
     249
     250  assert(pending_write_vector_capacity != 0);
    249251
    250252#ifdef _OPENMP
    251     #define LINE_SIZE 128L
    252     int malloc_size = (sizeof (pending_write_t) * (pending_write_vector_capacity + 1) + (LINE_SIZE - 1)) & ~(LINE_SIZE - 1);
    253     assert((sizeof(pending_write_t) * (pending_write_vector_capacity + 1)) <= malloc_size && "bad allocation size");
    254 
    255     #pragma omp parallel
    256     {
    257         posix_memalign((void **) &pending_write_vector, LINE_SIZE, malloc_size);
    258         pending_write_vector_nb = (int32_t *) &pending_write_vector[0];
    259         pending_write_vector = &pending_write_vector[1];
    260         //printf("malloc 0x%x @%p, idx @0x%x\n", malloc_size, pending_write_vector, pending_write_vector_nb);
    261         *pending_write_vector_nb = 0;
    262     }
     253#define LINE_SIZE 128L
     254  int malloc_size =
     255      (sizeof (pending_write_t) * (pending_write_vector_capacity + 1) + (LINE_SIZE - 1)) & ~(LINE_SIZE - 1);
     256  assert((sizeof (pending_write_t) * (pending_write_vector_capacity + 1)) <= malloc_size && "bad allocation size");
     257
     258#pragma omp parallel
     259  {
     260          posix_memalign((void **)&pending_write_vector, LINE_SIZE, malloc_size);
     261          pending_write_vector_nb = (int32_t *)&pending_write_vector[0];
     262          pending_write_vector = &pending_write_vector[1];
     263          printf("malloc 0x%x @%p, idx @0x%x\n", malloc_size, pending_write_vector, pending_write_vector_nb);
     264          *pending_write_vector_nb = 0;
     265  }
    263266#else
    264     pending_write_vector = (pending_write_vector_t) malloc(sizeof(pending_write_t) * pending_write_vector_capacity);
    265     pending_write_vector_nb = (int32_t *) malloc(sizeof(int32_t));
    266     *pending_write_vector_nb = 0;
    267 #endif
    268 
    269 
    270     // create the clock list
    271     clock_list_t clock_list;
    272     create_clock_list(clock_list, get_equi_list());
    273     if (dump_netlist_info) {
    274         cerr << "Clock list\n" << "----------\n" << clock_list << "\n\n";
    275     }
    276 
    277     // Check if a clock exists in the system
    278     if (clock_list.empty()) {
    279         cerr << "System need a clock.\n" <<
    280             "Please define system clock using special type \"sc_clock\".\n";
    281         exit(22);
    282     }
    283     // Check if any constructor wrote into registers
    284     if (*pending_write_vector_nb != 0) {
    285         cerr <<
    286             "Error : Register/Signal writing is not allowed before sc_initialize.\n"
    287             "Move initializations from constructors/sc_main to module reset sequences.\n";
    288         // we are unable to dump register(s) name(s)
    289         // because the table binding is not yet completed.
    290         exit(24);
    291     }
    292 
    293     string base_name = get_scheduling(scheduling_method);
    294 
    295     if (dynamic_link_of_scheduling_code) {
    296         compile_and_link(base_name.c_str());
    297     }
    298     else {
    299         use_static_func();
    300     }
    301 
    302     *pending_write_vector_nb = 0;
    303 
    304     check_all_ports();
    305     usage.start();
     267        pending_write_vector_nb = (int32_t *)malloc(sizeof(int32_t));
     268        pending_write_vector = (pending_write_vector_t)
     269            malloc(sizeof (pending_write_t) * pending_write_vector_capacity);
     270#endif
     271 
     272  // create the clock list
     273  clock_list_t clock_list;
     274  create_clock_list (clock_list, get_equi_list ());
     275  if (dump_netlist_info)
     276    cerr << "Clock list\n"
     277         << "----------\n"
     278         << clock_list << "\n\n";
     279  // Check if a clock exists in the system
     280  if (clock_list.empty()) {
     281      cerr << "System need a clock.\n" <<
     282          "Please define system clock using special type \"sc_clock\".\n";
     283      exit(22);
     284  }
     285
     286  string base_name = get_scheduling (scheduling_method);
     287
     288  if (dynamic_link_of_scheduling_code)
     289    compile_and_link (base_name.c_str());
     290  else
     291    use_static_func ();
     292 
     293  *pending_write_vector_nb = 0;
     294 
     295  check_all_ports ();
     296  usage.start ();
    306297
    307298    if (dump_stage) {
  • sources/src/internal.h

    r59 r65  
    6060extern int scheduling_method;
    6161extern bool use_port_dependency;
    62 extern bool use_openmp;
     62extern const bool use_openmp;
    6363
    6464#define NO_SCHEDULING       0
  • sources/src/sc_interface.h

    r52 r65  
    1717#include "sc_fwd.h"
    1818#include "internal_ext.h"
     19#include <sys/types.h>
    1920
    2021namespace sc_core {
  • sources/src/sc_main.cc

    r63 r65  
    3535 */
    3636
    37 #include <sstream>
     37#include <cassert>
     38#include <cstring> // strcmp
    3839#include <list>
    3940#ifdef _OPENMP
     
    4142#endif
    4243#include <set>
    43 #include <cstring> // strcmp
    44 #include <cassert>
     44#include <sstream>
    4545
    4646#include "internal.h"
     
    9191
    9292#ifdef _OPENMP
    93 bool use_openmp = true;
     93const bool use_openmp = true;
    9494#else
    95 bool use_openmp = false;
     95const bool use_openmp = false;
    9696#endif
    9797
     
    395395    }
    396396
    397     int ret = sc_main(argc, argv);
    398     //free(pending_write_vector);
    399     close_systemcass();
     397  int ret = sc_main(argc, argv);
     398//  free (pending_write_vector);
     399  close_systemcass ();
    400400
    401401    if (have_to_stop) {
  • sources/src/sc_module.cc

    r63 r65  
    132132    dont_initialize = false;
    133133#ifdef _OPENMP
    134     omp_threadnum = omp_get_thread_num();
     134  omp_threadnum = omp_get_thread_num();
    135135#endif
    136136}
  • sources/src/sc_port.cc

    r63 r65  
    5050#endif
    5151
     52
    5253extern "C" {
    5354    extern char unstable;
     
    228229            << " internal pending writings) ";
    229230#endif
    230         unsigned int i;
    231         for (i = 0; i < *pending_write_vector_nb; ++i) {
     231        unsigned int i;
     232        for (i = 0; i < *pending_write_vector_nb; ++i) {
     233#if 0
     234                cerr << "pending_write[" << i << "] : " << pending_write_vector[i];
     235#endif
    232236#define iter (sc_core::pending_write_vector[i])
    233237#ifdef CONFIG_DEBUG
  • sources/src/sc_signal.h

    r63 r65  
    1616
    1717// Define registers writing method
     18#include <string.h>
    1819#include <iostream>
    1920#include <cstdlib>
     
    6869// Pending write to register (simple stack)
    6970typedef pending_write_t * pending_write_vector_t;
    70 extern "C" int32_t * pending_write_vector_nb;
     71extern "C" int32_t *pending_write_vector_nb;
    7172extern "C" unsigned long long int total_assig;
    7273#ifdef _OPENMP
    73 #pragma omp threadprivate(pending_write_vector_nb, total_assig)
     74#pragma omp threadprivate (pending_write_vector_nb,total_assig)
    7475#endif
    7576extern unsigned int pending_write_vector_capacity;
     77
    7678extern pending_write_vector_t pending_write_vector;
    7779#ifdef _OPENMP
    78 #pragma omp threadprivate(pending_write_vector)
    79 #endif
    80 
    81 template < typename T >
    82 inline void post_write(base_type * const pointer_, const T & value_) /*INLINE*/;
     80#pragma omp threadprivate (pending_write_vector)
     81#endif
     82
     83template <typename T>
     84inline void post_write (base_type *const pointer_,
     85                        const T          value_) /*INLINE*/;
    8386
    8487template < typename T >
     
    9396
    9497template < typename T >
    95 inline void post_write(base_type * const pointer_, const T & value_) {
     98inline void post_write(base_type * const pointer_, const T value_) {
    9699    if (sizeof(T) > sizeof(base_type)) {
    97100        post_multiwrite(pointer_, value_);
     
    105108            exit(-1);
    106109        }
    107 #endif
    108         pending_write_vector[*pending_write_vector_nb].pointer = pointer_;
    109         // pending_write_vector[(*pending_write_vector_nb)++].value = *(reinterpret_cast<const base_type*const>(&value_)); => bug !
    110         pending_write_vector[(*pending_write_vector_nb)++].value = value_; // => bug avec blues !
    111 
    112         // -> fix to use user-defined struct in sc_signal/sc_in/sc_out/sc_inout
    113         // pending_write_vector[(*pending_write_vector_nb)++].value = *((base_type*)&value_); => bug !
     110#endif // CONFIG_DEBUG
     111        sc_core::pending_write_vector[*pending_write_vector_nb].pointer = pointer_;
     112        sc_core::pending_write_vector[(*pending_write_vector_nb)++].value = value_;
     113
    114114#if 0
    115115        std::cerr << "posted write : ptr = " << pointer_ << ", val = " << value_ << "\n";
     
    124124}
    125125
    126 
    127 inline bool is_posted_write() {
    128     return *pending_write_vector_nb > 0;
     126inline bool is_posted_write ()
     127{
     128  return *pending_write_vector_nb > 0;
    129129}
    130130
     
    165165};
    166166
    167 
    168 template < typename T >
    169 class sc_signal : public sc_signal_base {
    170 
    171     private:
    172 
    173     T val;
    174     T new_val;
    175     typedef T data_type;
    176     typedef sc_signal < T > this_type;
    177 
    178 
    179     ///////////
    180     // Internal
    181     public:
    182     void init();
    183     ///////////
    184 
    185     void check_writer();
    186 
    187     public:
    188     // constructors, destructor
    189     sc_signal() {
    190         if (typeid(data_type) == typeid(double) || typeid(data_type) == typeid(float)) {
    191             std::cerr << "Error: SystemCASS does not support sc_signal<T> with T of type " << typeid(data_type).name() << std::endl;
    192             exit(1);
    193         }
    194         init();
    195     }
    196 
    197     explicit sc_signal(const char * name_) : sc_signal_base(name_) {
    198         init();
    199     }
    200 
    201     /*virtual*/ ~sc_signal() {}
    202     /*virtual*/ inline const data_type & read() const INLINE;
    203     /*virtual*/ inline void write(const data_type &) /*INLINE*/;
    204 
    205     inline operator const data_type & () const { return this->read(); }
    206 
    207     inline this_type & operator = (const data_type & a) {
    208         sc_signal< T >::write(a);
    209         return *this;
    210     }
    211 
    212     inline this_type & operator = (const sc_signal < T > & a) {
    213         sc_signal< T >::write(a.read());
    214         return *this;
    215     }
    216 
    217     inline this_type & operator += (const data_type & a) {
    218         sc_signal< T >::write(read() + a);
    219         return *this;
    220     }
    221 
    222     inline this_type & operator += (const sc_signal < T > & a) {
    223         sc_signal< T >::write(read() + a.read());
    224         return *this;
    225     }
    226 
    227     inline void * operator new (size_t size, size_t align) {
    228         void * p;
    229         const size_t nsize = (size + align - 1) & ~(align - 1);
    230         if (nsize < size) {
    231             std::cerr << "sc_signal new() alignement doesn't work (" <<
    232                 nsize << " < " << size << ")" << std::endl;
    233             abort();
    234         }
    235 
    236         if (posix_memalign(&p, align, nsize) == 0) {
    237             return p;
    238         }
    239         else {
    240             return NULL;
    241         }
    242     }
    243 
    244     inline void * operator new (size_t size) {
    245         return malloc(size);
    246     }
    247 
    248     inline void * operator new (size_t size, void * p) {
    249         return p;
    250     }
    251 
    252     const data_type & get_new_value() const {
    253         // Warning: untested and doesn't support variable size
    254         unsigned int i = 0;
    255         for (i = 0; i < pending_write_vector_capacity; i++) {
    256             if (pending_write_vector[i].pointer == get_pointer()) {
    257                 return pending_write_vector[i].value;
    258             }
    259         }
    260         return val;
    261     }
    262 
    263     //  void trace (sc_trace_file * tf) const;
    264     /*
    265     virtual void print(std::ostream & o) const { o << *this; }
    266     virtual void dump(std::ostream & o) const { o << *this; }
    267     */
    268 
    269     private:
    270     // disabled
    271     sc_signal(const sc_signal < T > &);
     167template <typename T>
     168class sc_signal : public sc_signal_base
     169{
     170private:
     171  T val;
     172  T new_val;
     173  typedef T                data_type;
     174  typedef sc_signal < T >  this_type;
     175
     176  ///////////
     177  // Internal
     178public: void init ();
     179  ///////////
     180
     181  //  virtual void update ();
     182  void check_writer ();
     183public:
     184  // constructors, destructor
     185  sc_signal ()
     186  { init (); }
     187  explicit sc_signal (const char *name_): sc_signal_base(name_)
     188  { init (); }
     189  /*virtual */~ sc_signal ()
     190  {}
     191  // methods
     192  /*
     193  virtual void register_port (sc_port_base &, const char *)
     194  {}
     195  virtual const sc_event & default_event () const
     196  {}
     197  virtual const sc_event & value_changed_event () const
     198  {}
     199  */
     200  /*virtual*/ inline const data_type & read () const INLINE;
     201/*
     202  virtual const T & get_data_ref () const
     203  {}
     204  virtual bool event () const
     205  {}
     206  */
     207  /*virtual*/ inline void write (const data_type &) /*INLINE*/;
     208  inline operator const data_type & () const
     209  { return this->read(); }
     210  inline this_type& operator = (const data_type & a)
     211  { sc_signal<T>::write (a); return *this; }
     212  inline this_type& operator = (const sc_signal < T > &a)
     213  { sc_signal<T>::write (a.read()); return *this; }
     214  inline this_type& operator += (const data_type & a)
     215  { sc_signal<T>::write (read() + a); return *this; }
     216  inline this_type& operator += (const sc_signal < T > &a)
     217  { sc_signal<T>::write (read()+a.read()); return *this; }
     218  inline void * operator new (size_t size, size_t align)
     219  {
     220    void *p;
     221    const size_t nsize = (size + align - 1) & ~(align -1);
     222    if (nsize < size) {
     223        std::cerr << "sc_signal new() alignement doesn't work (" <<
     224            nsize << " < " << size << ")" << std::endl;
     225        abort();
     226    }
     227
     228    if (posix_memalign(&p, align, nsize) == 0)
     229        return p;
     230    else
     231        return NULL;
     232  }
     233  inline void * operator new (size_t size)
     234  {
     235    return malloc(size);
     236  }
     237  inline void * operator new (size_t size, void *p)
     238  {
     239    return p;
     240  }
     241
     242  const data_type & get_new_value () const;
     243//  void trace (sc_trace_file * tf) const;
     244  /*
     245        virtual void print (std::ostream &o) const
     246  { o << *this; }
     247  virtual void dump (std::ostream &o) const
     248  { o << *this; }
     249        */
     250private:
     251  // disabled
     252  sc_signal (const sc_signal < T > &);
    272253
    273254};
    274255
    275 
    276 template < typename T >
    277 void sc_signal< T >::init() {
    278     set_pointer((tab_t *) (void *) &val);
    279     set_kind(kind_string);
    280     sc_interface::init(sizeof(data_type));
     256template <typename T>
     257void
     258sc_signal<T>::init()
     259{
     260        set_pointer ((tab_t*)(void*)&val);
     261  set_kind    (kind_string);
     262  sc_interface::init (sizeof (data_type));
    281263#if 0
    282     val = (T) 0; /* The simulator initializes the signal/register to 0.    */
    283                  /* However, hardware initialization still has to be done. */
    284                  /* This kind of initialization is for trace diffing.      */
     264  val = 0; /* The simulator initializes the signal/register to 0.    */
     265           /* However, hardware initialization still has to be done. */
     266           /* This kind of initialization is for trace diffing.      */
    285267#else
    286     memset(&val, 0, sizeof(val));
    287     memset(&new_val, 0, sizeof(new_val));
    288 #endif
    289 }
    290 
     268   memset(&val, 0, sizeof(val));
     269   memset(&new_val, 0, sizeof(new_val));
     270#endif
     271}
    291272
    292273// read the value
     
    307288        return;
    308289    }
     290  if (sc_signal<T>::val == value_ && sc_signal<T>::new_val == value_)
     291    return;
    309292#ifdef CONFIG_DEBUG
    310293    if (get_pointer() == NULL) {
     
    319302    std::cerr << "write (posted) " << value_ << " on sc_signal (writing into register) '" << name() << "'\n";
    320303#endif
    321     sc_signal<T>::new_val = value_;
    322     post_write(/*(tab_t*)&val*/ get_pointer(), value_);
     304  sc_signal<T>::new_val = value_;
     305  post_write (/*(tab_t*)&val*/ get_pointer(), value_);
    323306}
    324307
  • sources/src/schedulers.cc

    r63 r65  
    6161namespace sc_core {
    6262
     63//
    6364// sort_functions splits and sorts instances_list into three functions lists :
    64 method_process_list_t * transition_func_list;
    65 method_process_list_t * moore_func_list;
    66 #ifdef _OPENMP
    67 #pragma omp threadprivate(transition_func_list, moore_func_list)
     65method_process_list_t *transition_func_list;
     66method_process_list_t *moore_func_list;
     67#ifdef _OPENMP
     68#pragma omp threadprivate (transition_func_list, moore_func_list)
    6869#endif
    6970method_process_list_t combinational_func_list;
     71
    7072/* ***************************** */
    7173/* Dumping functions (for debug) */
     
    8486/*  functions   */
    8587/****************/
    86 
    87 
    8888
    8989static bool sort_by_module_ptr (const method_process_t * a1, const method_process_t * a2) {
     
    133133            if ((*m)->omp_threadnum == omp_get_thread_num())
    134134#endif
    135             {
    136                 if ((*m)->is_transition()) {
    137                     transition_func_list->push_back(*m);
    138                 }
    139                 else if ((*m)->is_genmoore()) {
    140                     moore_func_list->push_back(*m);
    141                 }
    142             }
    143         }
    144         // Sort transition functions by method pointer (1) and by module pointer (2)
    145         std::sort(transition_func_list->begin(), transition_func_list->end(), sort_by_fct_ptr);
    146         // Sort generation functions by method pointer (1) and by module pointer (2)
    147         std::sort(moore_func_list->begin(), moore_func_list->end(), sort_by_fct_ptr);
    148     }
    149 
    150     for (m = method_process_list.begin(); m != method_process_list.end(); ++m) {
    151         if ((*m)->is_combinational()) {
    152             combinational_func_list.push_back(*m);
    153         }
    154     }
     135            {
     136                if ((*m)->is_transition ())
     137                    transition_func_list->push_back(*m);
     138                else if ((*m)->is_genmoore ())
     139                    moore_func_list->push_back(*m);
     140            }
     141        }
     142#if 1
     143        // Sort transition functions by method pointer (1) and by module pointer (2)
     144        std::sort (transition_func_list->begin(), transition_func_list->end(), sort_by_fct_ptr);
     145        // Sort generation functions by method pointer (1) and by module pointer (2)
     146        std::sort (moore_func_list->begin(), moore_func_list->end(), sort_by_fct_ptr);
     147#endif
     148#if 0
     149        std::sort (transition_func_list->begin(), transition_func_list->end(), only_sort_by_module_ptr);
     150        std::sort (moore_func_list->begin(), moore_func_list->end(), only_sort_by_module_ptr);
     151#endif
     152  }
     153
     154  for (m = method_process_list.begin(); m != method_process_list.end(); ++m) {
     155    if ((*m)->is_combinational()) combinational_func_list.push_back(*m);
     156  }
    155157}
    156158
     
    225227}
    226228
    227 
    228 string get_scheduling(int scheduling_method) {
    229     string base_name;
    230     /* marque les fonctions comme fonction de mealy ou non */
    231     if (dump_funclist_info) {
    232         cerr << "method process list : " << method_process_list << "\n";
    233     }
    234 
    235     sort_functions();
     229string
     230get_scheduling (int scheduling_method)
     231{
     232  string base_name;
     233  /* marque les fonctions comme fonction de mealy ou non */
     234  if (dump_funclist_info)
     235    cerr << "method process list : " << method_process_list << "\n";
     236  sort_functions ();
    236237#ifdef _OPENMP
    237238#pragma omp parallel
     
    243244            cerr << "Thread " << omp_get_thread_num() << "\n";
    244245#endif
    245             cerr << "  Transition functions : " << *transition_func_list << "\n";
    246             cerr << "  Moore generation functions : " << *moore_func_list << "\n";
     246    cerr << "  Transition functions : " << *transition_func_list << "\n";
     247    cerr << "  Moore generation functions : " << *moore_func_list << "\n";
    247248#ifdef _OPENMP
    248249#pragma omp master
    249250#endif
    250             {
    251                 if (!combinational_func_list.empty()) {
    252                     cerr << "Mealy generation functions : " << combinational_func_list << "\n";
    253                 }
    254             }
    255         }
    256 
    257     /* Schedule */
    258         switch (scheduling_method) {
    259             case BUCHMANN_SCHEDULING :
    260             {
    261                 // Generate the scheduled code, compile and link.
    262                 // Buchmann's thesis explains this scheduling method.
    263                 // Uses port dependancies like Dr. Mouchard.
    264                 ProcessDependencyList * process_list = BuchmannScheduling();
    265                 if (dynamic_link_of_scheduling_code) {
    266                     base_name = gen_scheduling_code_for_dynamic_link(*transition_func_list, *moore_func_list, *process_list);
    267                 }
    268                 else {
    269                     gen_scheduling_code_for_static_func(*transition_func_list, *moore_func_list, *process_list);
    270                 }
    271                 break;
    272             }
    273 
    274             case MOUCHARD_SCHEDULING :
    275             {
    276                 // Generate the scheduled code, compile and link.
    277                 // Mouchard's thesis explains this scheduling method.
    278                 // Uses port dependancies like Dr. Mouchard.
    279                 // CAUTION : unlike FastSysC, this scheduling is totally static
    280                 // and does not use an event-driven scheduler.
    281                 ProcessDependencyList * process_list = MouchardScheduling();
    282                 if (dynamic_link_of_scheduling_code) {
    283                     base_name = gen_scheduling_code_for_dynamic_link(*transition_func_list, *moore_func_list, *process_list);
    284                 }
    285                 else {
    286                     gen_scheduling_code_for_static_func (*transition_func_list, *moore_func_list, *process_list);
    287                 }
    288                 break;
    289             }
    290 
    291             case CASS_SCHEDULING :
    292             {
    293                 // Generate the scheduled code, compile and link
    294                 // Hommais's thesis explains this scheduling method (like CASS strategy)
    295                 // Doesn't use port dependancies
    296                 strong_component_list_t * strong_list = NULL;
     251    {
     252      if (!combinational_func_list.empty()) {
     253            cerr << "Mealy generation functions : " <<
     254                combinational_func_list << "\n";
     255      }
     256    }
     257  }
     258
     259  /* Schedule */
     260  switch (scheduling_method) {
     261  case BUCHMANN_SCHEDULING : {
     262    // Generate the scheduled code, compile and link.
     263    // Buchmann's thesis explains this scheduling method.
     264    // Uses port dependancies like Dr. Mouchard.
     265    ProcessDependencyList* process_list = BuchmannScheduling ();
     266  if (dynamic_link_of_scheduling_code)
     267    base_name = gen_scheduling_code_for_dynamic_link (*transition_func_list, *moore_func_list,*process_list);
     268  else
     269    gen_scheduling_code_for_static_func (*transition_func_list, *moore_func_list, *process_list);
     270    break;
     271  }
     272  case MOUCHARD_SCHEDULING : {
     273    // Generate the scheduled code, compile and link.
     274    // Mouchard's thesis explains this scheduling method.
     275    // Uses port dependancies like Dr. Mouchard.
     276    // CAUTION : unlike FastSysC, this scheduling is totally static
     277    // and does not use an event-driven scheduler.
     278    ProcessDependencyList* process_list = MouchardScheduling ();
     279  if (dynamic_link_of_scheduling_code)
     280    base_name = gen_scheduling_code_for_dynamic_link(*transition_func_list, *moore_func_list,*process_list);
     281  else
     282    gen_scheduling_code_for_static_func (*transition_func_list, *moore_func_list, *process_list);
     283    break;
     284  }
     285  case CASS_SCHEDULING : {
     286    // Generate the scheduled code, compile and link
     287    // Hommais's thesis explains this scheduling method (like CASS strategy)
     288    // Doesn't use port dependancies
     289    strong_component_list_t *strong_list = NULL;
    297290#ifdef _OPENMP
    298291#pragma omp master
    299292#endif
    300                 {
    301                     Graph * g = makegraph (&combinational_func_list);
    302                     if (dump_all_graph && g) {
    303                         graph2dot("module_graph", *g);
    304                     }
    305                     strong_list = strong_component(g);
    306                 }
    307                 if (dynamic_link_of_scheduling_code) {
    308                     base_name = gen_scheduling_code_for_dynamic_link(*transition_func_list, *moore_func_list, strong_list);
    309                 }
    310                 else {
    311                     gen_scheduling_code_for_quasistatic_func (*transition_func_list, *moore_func_list, strong_list);
    312                 }
    313                 // free the void_lists in strong_list
    314                 //for ( strong_component_list_t::iterator i = strong_list->begin(); i < strong_list->end(); i++) {
    315                 //    delete *i;
    316                 //}
     293    {
     294    Graph *g = makegraph (&combinational_func_list);
     295    if (dump_all_graph && g)
     296      graph2dot("module_graph", *g);
     297    strong_list = strong_component (g);
     298    }
     299  if (dynamic_link_of_scheduling_code)
     300    base_name = gen_scheduling_code_for_dynamic_link(*transition_func_list, *moore_func_list,strong_list);
     301  else
     302    gen_scheduling_code_for_quasistatic_func (*transition_func_list, *moore_func_list, strong_list);
    317303#ifdef _OPENMP
    318304#pragma omp master
    319305#endif
    320                 {
    321                     delete strong_list;
    322                 }
    323                 break;
    324             }
    325             default :
    326                 cerr << "Error : Unable to schedule SystemC process."
    327                     "Please select a scheduling method.\n";
    328                 exit (35);
    329         }
    330     }
    331     return base_name;
     306  {
     307    delete strong_list;
     308  }
     309  break;
     310  }
     311  default :
     312    cerr << "Error : Unable to schedule SystemC process."
     313            "Please select a scheduling method.\n";
     314    exit (35);
     315  }
     316  }
     317  return base_name;
    332318}
    333319
Note: See TracChangeset for help on using the changeset viewer.