/* -*- c++ -*- * File : vci_mem_cache.cpp * Date : 30/10/2008 * Copyright : UPMC / LIP6 * Authors : Alain Greiner / Eric Guthmuller * * SOCLIB_LGPL_HEADER_BEGIN * * This file is part of SoCLib, GNU LGPLv2.1. * * SoCLib is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; version 2.1 of the License. * * SoCLib is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with SoCLib; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA * * SOCLIB_LGPL_HEADER_END * * Maintainers: alain eric.guthmuller@polytechnique.edu */ #include "../include/vci_mem_cache.h" #define DEBUG_VCI_MEM_CACHE 0 namespace soclib { namespace caba { #ifdef DEBUG_VCI_MEM_CACHE const char *tgt_cmd_fsm_str[] = { "TGT_CMD_IDLE", "TGT_CMD_READ", "TGT_CMD_READ_EOP", "TGT_CMD_WRITE", "TGT_CMD_ATOMIC", "TGT_CMD_CLEANUP", }; const char *tgt_rsp_fsm_str[] = { "TGT_RSP_READ_IDLE", "TGT_RSP_WRITE_IDLE", "TGT_RSP_LLSC_IDLE", "TGT_RSP_CLEANUP_IDLE", "TGT_RSP_XRAM_IDLE", "TGT_RSP_INIT_IDLE", "TGT_RSP_READ_TEST", "TGT_RSP_READ_WORD", "TGT_RSP_READ_LINE", "TGT_RSP_WRITE", "TGT_RSP_LLSC", "TGT_RSP_CLEANUP", "TGT_RSP_XRAM_TEST", "TGT_RSP_XRAM_WORD", "TGT_RSP_XRAM_LINE", "TGT_RSP_INIT", }; const char *init_cmd_fsm_str[] = { "INIT_CMD_INVAL_IDLE", "INIT_CMD_INVAL_SEL", "INIT_CMD_INVAL_NLINE", "INIT_CMD_UPDT_IDLE", "INIT_CMD_UPDT_SEL", "INIT_CMD_UPDT_NLINE", "INIT_CMD_UPDT_INDEX", "INIT_CMD_UPDT_DATA", }; const char *init_rsp_fsm_str[] = { "INIT_RSP_IDLE", "INIT_RSP_UPT_LOCK", "INIT_RSP_UPT_CLEAR", "INIT_RSP_END", }; const char *read_fsm_str[] = { "READ_IDLE", "READ_DIR_LOCK", "READ_DIR_HIT", "READ_RSP", "READ_TRT_LOCK", "READ_TRT_SET", "READ_XRAM_REQ", }; const char *write_fsm_str[] = { "WRITE_IDLE", "WRITE_NEXT" "WRITE_DIR_LOCK", "WRITE_DIR_HIT_READ", "WRITE_DIR_HIT", "WRITE_UPT_LOCK", "WRITE_WAIT_UPT", "WRITE_UPDATE", "WRITE_RSP", "WRITE_TRT_LOCK", "WRITE_TRT_DATA", "WRITE_TRT_SET", "WRITE_WAIT_TRT", "WRITE_XRAM_REQ", }; const char *ixr_rsp_fsm_str[] = { "IXR_RSP_IDLE", "IXR_RSP_ACK", "IXR_RSP_TRT_ERASE", "IXR_RSP_TRT_READ", }; const char *xram_rsp_fsm_str[] = { "XRAM_RSP_IDLE", "XRAM_RSP_TRT_COPY", "XRAM_RSP_TRT_DIRTY", "XRAM_RSP_DIR_LOCK", "XRAM_RSP_DIR_UPDT", "XRAM_RSP_DIR_RSP", "XRAM_RSP_UPT_LOCK", "XRAM_RSP_WAIT", "XRAM_RSP_INVAL", "XRAM_RSP_WRITE_DIRTY", }; const char *xram_cmd_fsm_str[] = { "XRAM_CMD_READ_IDLE", "XRAM_CMD_WRITE_IDLE", "XRAM_CMD_LLSC_IDLE", "XRAM_CMD_XRAM_IDLE", "XRAM_CMD_READ_NLINE", "XRAM_CMD_WRITE_NLINE", "XRAM_CMD_LLSC_NLINE", "XRAM_CMD_XRAM_DATA", }; const char *llsc_fsm_str[] = { "LLSC_IDLE", "LL_DIR_LOCK", "LL_DIR_HIT", "LL_RSP", "SC_DIR_LOCK", "SC_DIR_HIT", "SC_RSP_FALSE", "SC_RSP_TRUE", "LLSC_TRT_LOCK", "LLSC_TRT_SET", "LLSC_XRAM_REQ", }; const char *cleanup_fsm_str[] = { "CLEANUP_IDLE", "CLEANUP_DIR_LOCK", "CLEANUP_DIR_WRITE", "CLEANUP_RSP", }; const char *alloc_dir_fsm_str[] = { "ALLOC_DIR_READ", "ALLOC_DIR_WRITE", "ALLOC_DIR_LLSC", "ALLOC_DIR_CLEANUP", "ALLOC_DIR_XRAM_RSP", }; const char *alloc_trt_fsm_str[] = { "ALLOC_TRT_READ", "ALLOC_TRT_WRITE", "ALLOC_TRT_LLSC", "ALLOC_TRT_XRAM_RSP", "ALLOC_TRT_IXR_RSP", }; const char *alloc_upt_fsm_str[] = { "ALLOC_UPT_WRITE", "ALLOC_UPT_XRAM_RSP", "ALLOC_UPT_INIT_RSP", }; #endif #define tmpl(x) template x VciMemCache using soclib::common::uint32_log2; //////////////////////////////// // Constructor //////////////////////////////// tmpl(/**/)::VciMemCache( sc_module_name name, const soclib::common::MappingTable &mtp, const soclib::common::MappingTable &mtc, const soclib::common::MappingTable &mtx, const soclib::common::IntTab &vci_ixr_index, const soclib::common::IntTab &vci_ini_index, const soclib::common::IntTab &vci_tgt_index, size_t nways, size_t nsets, size_t nwords) : soclib::caba::BaseModule(name), p_clk("clk"), p_resetn("resetn"), p_vci_tgt("vci_tgt"), p_vci_ini("vci_ini"), m_initiators( 32 ), m_ways( nways ), m_sets( nsets ), m_words( nwords ), m_srcid_ixr( mtx.indexForId(vci_ixr_index) ), m_srcid_ini( mtc.indexForId(vci_ini_index) ), //m_mem_segment("bidon",0,0,soclib::common::IntTab(),false), m_seglist(mtp.getSegmentList(vci_tgt_index)), m_reg_segment("bidon",0,0,soclib::common::IntTab(),false), m_coherence_table( mtc.getCoherenceTable() ), m_atomic_tab( m_initiators ), m_transaction_tab( TRANSACTION_TAB_LINES, nwords ), m_update_tab( UPDATE_TAB_LINES ), m_cache_directory( nways, nsets, nwords, vci_param::N ), nseg(0), #define L2 soclib::common::uint32_log2 m_x( L2(m_words), 2), m_y( L2(m_sets), L2(m_words) + 2), m_z( vci_param::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), m_nline( vci_param::N - L2(m_words) - 2, L2(m_words) + 2), #undef L2 // FIFOs m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), m_cmd_read_word_fifo("m_cmd_read_word_fifo", 4), m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), m_cmd_llsc_addr_fifo("m_cmd_llsc_addr_fifo",4), m_cmd_llsc_sc_fifo("m_cmd_llsc_sc_fifo",4), m_cmd_llsc_srcid_fifo("m_cmd_llsc_srcid_fifo",4), m_cmd_llsc_trdid_fifo("m_cmd_llsc_trdid_fifo",4), m_cmd_llsc_pktid_fifo("m_cmd_llsc_pktid_fifo",4), m_cmd_llsc_wdata_fifo("m_cmd_llsc_wdata_fifo",4), m_cmd_cleanup_srcid_fifo("m_cmd_cleanup_srcid_fifo",4), m_cmd_cleanup_trdid_fifo("m_cmd_cleanup_trdid_fifo",4), m_cmd_cleanup_pktid_fifo("m_cmd_cleanup_pktid_fifo",4), m_cmd_cleanup_nline_fifo("m_cmd_cleanup_nline_fifo",4), r_tgt_cmd_fsm("r_tgt_cmd_fsm"), r_read_fsm("r_read_fsm"), r_write_fsm("r_write_fsm"), r_init_rsp_fsm("r_init_rsp_fsm"), r_cleanup_fsm("r_cleanup_fsm"), r_llsc_fsm("r_llsc_fsm"), r_ixr_rsp_fsm("r_ixr_rsp_fsm"), r_xram_rsp_fsm("r_xram_rsp_fsm"), r_xram_cmd_fsm("r_xram_cmd_fsm"), r_tgt_rsp_fsm("r_tgt_rsp_fsm"), r_init_cmd_fsm("r_init_cmd_fsm"), r_alloc_dir_fsm("r_alloc_dir_fsm"), r_alloc_trt_fsm("r_alloc_trt_fsm"), r_alloc_upt_fsm("r_alloc_upt_fsm") { assert(IS_POW_OF_2(nsets)); assert(IS_POW_OF_2(nwords)); assert(IS_POW_OF_2(nways)); assert(nsets); assert(nwords); assert(nways); assert(nsets <= 1024); assert(nwords <= 32); assert(nways <= 32); // Get the segments associated to the MemCache //std::list segList(mtp.getSegmentList(vci_tgt_index)); std::list::iterator seg; /* for(seg = segList.begin(); seg != segList.end() ; seg++) { if( seg->size() > 8 ) m_mem_segment = *seg; else m_reg_segment = *seg; nseg++; } */ for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) { if( seg->size() > 8 ) nseg++; } //assert( (nseg == 2) && (m_reg_segment.size() == 8) ); m_seg = new soclib::common::Segment*[nseg]; size_t i = 0; for ( seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++ ) { if ( seg->size() > 8 ) { m_seg[i] = &(*seg); i++; } else { m_reg_segment = *seg; } } assert( (m_reg_segment.size() == 8) ); // Memory cache allocation & initialisation m_cache_data = new data_t**[nways]; for ( size_t i=0 ; i[TRANSACTION_TAB_LINES]; // Allocation for XRAM_RSP FSM r_xram_rsp_victim_data = new sc_signal[nwords]; r_xram_rsp_to_tgt_rsp_data = new sc_signal[nwords]; r_xram_rsp_to_tgt_rsp_val = new sc_signal[nwords]; r_xram_rsp_to_xram_cmd_data = new sc_signal[nwords]; // Allocation for READ FSM r_read_data = new sc_signal[nwords]; r_read_to_tgt_rsp_data = new sc_signal[nwords]; r_read_to_tgt_rsp_val = new sc_signal[nwords]; // Allocation for WRITE FSM r_write_data = new sc_signal[nwords]; r_write_be = new sc_signal[nwords]; r_write_to_init_cmd_data = new sc_signal[nwords]; r_write_to_init_cmd_we = new sc_signal[nwords]; // Simulation SC_METHOD(transition); dont_initialize(); sensitive << p_clk.pos(); SC_METHOD(genMoore); dont_initialize(); sensitive << p_clk.neg(); } // end constructor ///////////////////////////////////////// // This function prints the statistics ///////////////////////////////////////// tmpl(void)::print_stats() { std::cout << "----------------------------------" << std::dec << std::endl; std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl << "- READ RATE = " << (float)m_cpt_read/m_cpt_cycles << std::endl << "- READ MISS RATE = " << (float)m_cpt_read_miss/m_cpt_read << std::endl << "- WRITE RATE = " << (float)m_cpt_write/m_cpt_cycles << std::endl << "- WRITE MISS RATE = " << (float)m_cpt_write_miss/m_cpt_write << std::endl << "- WRITE BURST LENGTH = " << (float)m_cpt_write_cells/m_cpt_write << std::endl << "- UPDATE RATE = " << (float)m_cpt_update/m_cpt_cycles << std::endl << "- UPDATE ARITY = " << (float)m_cpt_update_mult/m_cpt_update << std::endl << "- INVAL RATE = " << (float)m_cpt_inval/m_cpt_cycles << std::endl << "- INVAL ARITY = " << (float)m_cpt_inval_mult/m_cpt_inval << std::endl << "- SAVE DIRTY RATE = " << (float)m_cpt_write_dirty/m_cpt_cycles << std::endl << "- CLEANUP RATE = " << (float)m_cpt_cleanup/m_cpt_cycles << std::endl << "- LL RATE = " << (float)m_cpt_ll/m_cpt_cycles << std::endl << "- SC RATE = " << (float)m_cpt_sc/m_cpt_cycles << std::endl; } ///////////////////////////////// tmpl(/**/)::~VciMemCache() ///////////////////////////////// { for(size_t i=0; icontains(p_vci_tgt.address.read()) ) { reached = true; r_index = index; } } if ( !reached ) { std::cout << "Out of segment access in VCI_MEM_CACHE" << std::endl; std::cout << "Faulty address = " << p_vci_tgt.address.read() << std::endl; std::cout << "Faulty initiator = " << p_vci_tgt.srcid.read() << std::endl; exit(0); } else if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) { r_tgt_cmd_fsm = TGT_CMD_READ; } else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x0 ) ) { r_tgt_cmd_fsm = TGT_CMD_WRITE; } else if ((p_vci_tgt.cmd.read() == vci_param::CMD_LOCKED_READ) || (p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND) ) { r_tgt_cmd_fsm = TGT_CMD_ATOMIC; } else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x1 )) { r_tgt_cmd_fsm = TGT_CMD_CLEANUP; } } break; } ////////////////// case TGT_CMD_READ: { assert(((p_vci_tgt.plen.read() == 4) || (p_vci_tgt.plen.read() == m_words*4)) && "All read request to the MemCache must have PLEN = 4 or PLEN = 4*nwords"); if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) { cmd_read_fifo_put = true; if ( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; else r_tgt_cmd_fsm = TGT_CMD_READ_EOP; } break; } ////////////////////// case TGT_CMD_READ_EOP: { if ( p_vci_tgt.cmdval && p_vci_tgt.eop ){ r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } /////////////////// case TGT_CMD_WRITE: { if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) { cmd_write_fifo_put = true; if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } //////////////////// case TGT_CMD_ATOMIC: { assert(p_vci_tgt.eop && "Memory Cache Error: LL or SC command with length > 1 "); if ( p_vci_tgt.cmdval && m_cmd_llsc_addr_fifo.wok() ) { cmd_llsc_fifo_put = true; r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } ///////////////////// case TGT_CMD_CLEANUP: { assert(p_vci_tgt.eop && "Memory Cache Error: CLEANUP request with length > 1 "); if ( p_vci_tgt.cmdval && m_cmd_cleanup_nline_fifo.wok() ) { cmd_cleanup_fifo_put = true; r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } } // end switch tgt_cmd_fsm ///////////////////////////////////////////////////////////////////////// // INIT_RSP FSM ///////////////////////////////////////////////////////////////////////// // This FSM controls the response to the update or invalidate requests // sent by the memory cache to the L1 caches : // // - update request initiated by the WRITE FSM. // The FSM decrements the proper entry in the Update/Inval Table. // It sends a request to the TGT_RSP FSM to complete the pending // write transaction (acknowledge response to the writer processor), // and clear the UPT entry when all responses have been received. // - invalidate request initiated by the XRAM_RSP FSM. // The FSM decrements the proper entry in the Update/Inval_Table, // and clear the entry when all responses have been received. // // All those response packets are one word, compact // packets complying with the VCI advanced format. // The index in the Table is defined in the RTRDID field, and // the Transaction type is defined in the Update/Inval Table. ///////////////////////////////////////////////////////////////////// switch ( r_init_rsp_fsm.read() ) { /////////////////// case INIT_RSP_IDLE: { if ( p_vci_ini.rspval ) { assert ( ( p_vci_ini.rtrdid.read() < m_update_tab.size() ) && "UPT index too large in VCI response paquet received by memory cache" ); assert ( p_vci_ini.reop && "All response packets to update/invalidate requests must be one cell" ); r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); r_init_rsp_fsm = INIT_RSP_UPT_LOCK; } break; } /////////////////////// case INIT_RSP_UPT_LOCK: // decrement the number of expected responses { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { size_t count = 0; bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); assert ( valid && "Invalid UPT entry in VCI response paquet received by memory cache" ); if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; else r_init_rsp_fsm = INIT_RSP_IDLE; } break; } //////////////////////// case INIT_RSP_UPT_CLEAR: // clear the UPT entry { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); bool update = m_update_tab.is_update(r_init_rsp_upt_index.read()); if ( update ) r_init_rsp_fsm = INIT_RSP_END; else r_init_rsp_fsm = INIT_RSP_IDLE; m_update_tab.clear(r_init_rsp_upt_index.read()); } break; } ////////////////// case INIT_RSP_END: { if ( !r_init_rsp_to_tgt_rsp_req ) { r_init_rsp_to_tgt_rsp_req = true; r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); r_init_rsp_fsm = INIT_RSP_IDLE; } break; } } // end switch r_init_rsp_fsm //////////////////////////////////////////////////////////////////////////////////// // READ FSM //////////////////////////////////////////////////////////////////////////////////// // The READ FSM controls the read requests sent by processors. // It takes the lock protecting the cache directory to check the cache line status: // - In case of HIT, the fsm copies the data (one line, or one single word) // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. // The requesting initiator is registered in the cache directory. // - In case of MISS, the READ fsm takes the lock protecting the transaction tab. // If a read transaction to the XRAM for this line already exists, // or if the transaction tab is full, the fsm is stalled. // If a transaction entry is free, the READ fsm sends a request to the XRAM. //////////////////////////////////////////////////////////////////////////////////// switch ( r_read_fsm.read() ) { /////////////// case READ_IDLE: { if (m_cmd_read_addr_fifo.rok()) { m_cpt_read++; r_read_fsm = READ_DIR_LOCK; } break; } /////////////////// case READ_DIR_LOCK: // check directory for hit / miss { if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { size_t way = 0; DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); r_read_dirty = entry.dirty; r_read_tag = entry.tag; r_read_lock = entry.lock; r_read_way = way; r_read_word = m_cmd_read_word_fifo.read(); r_read_copies = entry.copies | (0x1 << m_cmd_read_srcid_fifo.read()); // In case of hit, the read acces must be registered in the copies bit-vector // for both a cache line read & a single word read (TLB coherence) if( entry.valid ) { r_read_fsm = READ_DIR_HIT; } else { r_read_fsm = READ_TRT_LOCK; m_cpt_read_miss++; } } break; } ////////////////// case READ_DIR_HIT: // read hit : update the memory cache { if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { // read data in the cache size_t set = m_y[m_cmd_read_addr_fifo.read()]; size_t way = r_read_way.read(); for ( size_t i=0 ; i(m_words,0), std::vector(m_words,0)); r_read_fsm = READ_XRAM_REQ; } break; } ///////////////////// case READ_XRAM_REQ: { if( !r_read_to_xram_cmd_req ) { cmd_read_fifo_get = true; r_read_to_xram_cmd_req = true; r_read_to_xram_cmd_nline = m_nline[m_cmd_read_addr_fifo.read()]; r_read_to_xram_cmd_trdid = r_read_trt_index.read(); r_read_fsm = READ_IDLE; } break; } } // end switch read_fsm /////////////////////////////////////////////////////////////////////////////////// // WRITE FSM /////////////////////////////////////////////////////////////////////////////////// // The WRITE FSM handles the write bursts sent by the processors. // All addresses in a burst must be in the same cache line. // A complete write burst is consumed in the FIFO & copied to a local buffer. // Then the FSM takes the lock protecting the cache directory, to check // if the line is in the cache. // // - In case of HIT, the cache is updated. // If there is no other copy, an acknowledge response is immediately // returned to the writing processor. // if the data is cached by other processoris, the FSM takes the lock // protecting the Update Table (UPT) to register this update transaction. // If the UPT is full, it releases the lock and waits. Then, it sends // a multi-update request to all owners of the line (but the writer), // through the INIT_CMD FSM. In case of multi-update transaction, the WRITE FSM // does not respond to the writing processor, as this response will be sent by // the INIT_RSP FSM when all update responses have been received. // // - In case of MISS, the WRITE FSM takes the lock protecting the transaction // table (TRT). If a read transaction to the XRAM for this line already exists, // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, // the WRITE FSM register a new transaction in TRT, and sends a read line request // to the XRAM. If the TRT is full, it releases the lock, and waits. // Finally, the WRITE FSM returns an aknowledge response to the writing processor. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_write_fsm.read() ) { //////////////// case WRITE_IDLE: // copy first word of a write burst in local buffer { if ( m_cmd_write_addr_fifo.rok()) { m_cpt_write++; m_cpt_write_cells++; // consume a word in the FIFO & write it in the local buffer cmd_write_fifo_get = true; size_t index = m_x[m_cmd_write_addr_fifo.read()]; r_write_address = m_cmd_write_addr_fifo.read(); r_write_word_index = index; r_write_word_count = 1; r_write_data[index] = m_cmd_write_data_fifo.read(); r_write_srcid = m_cmd_write_srcid_fifo.read(); r_write_trdid = m_cmd_write_trdid_fifo.read(); r_write_pktid = m_cmd_write_pktid_fifo.read(); // the be field must be set for all words for ( size_t i=0 ; i be_vector; std::vector data_vector; be_vector.clear(); data_vector.clear(); for ( size_t i=0; i be_vector; std::vector data_vector; be_vector.clear(); data_vector.clear(); for ( size_t i=0; i(m_words,0), std::vector(m_words,0) ); if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; } break; } ////////////////////// case XRAM_RSP_DIR_RSP: // send a request to TGT_RSP FSM in case of read { if ( !r_xram_rsp_to_tgt_rsp_req ) { r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; for (size_t i=0; i < m_words; i++) { r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; if( r_xram_rsp_trt_buf.single_word ) { r_xram_rsp_to_tgt_rsp_val[i] = (r_xram_rsp_trt_buf.word_index == i); } else { r_xram_rsp_to_tgt_rsp_val[i] = true; } } r_xram_rsp_to_tgt_rsp_req = true; if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; else r_xram_rsp_fsm = XRAM_RSP_IDLE; } break; } /////////////////////// case XRAM_RSP_UPT_LOCK: // Try to register the inval transaction in UPT { if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { size_t index; copy_t copies = r_xram_rsp_victim_copies.read(); copy_t mask = 0x1; size_t n=0; for ( size_t i=0 ; i<32 ; i++) { if ( copies & mask ) n++; mask = (mask << 1); } bool wok = m_update_tab.set(false, // it's an inval transaction 0, 0, 0, n, index); if ( wok ) { r_xram_rsp_upt_index = index; r_xram_rsp_fsm = XRAM_RSP_INVAL; } else { r_xram_rsp_fsm = XRAM_RSP_WAIT; } } break; } /////////////////// case XRAM_RSP_WAIT: // releases UPT lock for one cycle { r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; } //////////////////// case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM { if( !r_xram_rsp_to_init_cmd_req ) { r_xram_rsp_to_init_cmd_req = true; r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; r_xram_rsp_to_init_cmd_copies = r_xram_rsp_victim_copies; if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; else r_xram_rsp_fsm = XRAM_RSP_IDLE; } break; } ////////////////////////// case XRAM_RSP_WRITE_DIRTY: // send a write request to XRAM_CMD FSM { if ( !r_xram_rsp_to_xram_cmd_req ) { r_xram_rsp_to_xram_cmd_req = true; r_xram_rsp_to_xram_cmd_nline = r_xram_rsp_victim_nline.read(); r_xram_rsp_to_xram_cmd_trdid = r_xram_rsp_trt_index.read(); for(size_t i=0; i(m_words,0), std::vector(m_words,0)); r_llsc_fsm = LLSC_XRAM_REQ; } break; } /////////////////// case LLSC_XRAM_REQ: // request the XRAM_CMD FSM to tetch the missing line { if ( !r_llsc_to_xram_cmd_req ) { r_llsc_to_xram_cmd_req = true; r_llsc_to_xram_cmd_trdid = r_llsc_trt_index.read(); r_llsc_to_xram_cmd_nline = m_nline[m_cmd_llsc_addr_fifo.read()]; if( m_cmd_llsc_sc_fifo.read() ) { r_llsc_fsm = SC_RSP_FALSE; } else { cmd_llsc_fifo_get = true; r_llsc_fsm = LLSC_IDLE; } } break; } } // end switch r_llsc_fsm ////////////////////////////////////////////////////////////////////////////// // INIT_CMD FSM ////////////////////////////////////////////////////////////////////////////// // The INIT_CMD fsm controls the VCI CMD initiator port, used to update // or invalidate cache lines in L1 caches. // It implements a round-robin priority between the two following requests: // - r_write_to_init_cmd_req : update request from WRITE FSM // - r_xram_rsp_to_init_cmd_req : invalidate request from XRAM_RSP FSM // The inval request is a single cell VCI write command containing the // index of the line to be invalidated. // The update request is a multi-cells VCI write command : The first cell // contains the index of the cache line to be updated. The second cell contains // the index of the first modified word in the line. The following cells // contain the data. /////////////////////////////////////////////////////////////////////////////// switch ( r_init_cmd_fsm.read() ) { //////////////////////// case INIT_CMD_UPDT_IDLE: // Invalidate requests have highest priority { if ( r_xram_rsp_to_init_cmd_req ) { r_init_cmd_fsm = INIT_CMD_INVAL_SEL; m_cpt_inval++; } else if ( r_write_to_init_cmd_req ) { r_init_cmd_fsm = INIT_CMD_UPDT_SEL; m_cpt_update++; } break; } ///////////////////////// case INIT_CMD_INVAL_IDLE: // Update requests have highest priority { if ( r_write_to_init_cmd_req ) { r_init_cmd_fsm = INIT_CMD_UPDT_SEL; m_cpt_update++; } else if ( r_xram_rsp_to_init_cmd_req ) { r_init_cmd_fsm = INIT_CMD_INVAL_SEL; m_cpt_inval++; } break; } //////////////////////// case INIT_CMD_INVAL_SEL: // selects the next L1 cache { if (r_xram_rsp_to_init_cmd_copies.read() == 0) { // no more copies r_xram_rsp_to_init_cmd_req = false; r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; } else { // select the first target copy_t copies = r_xram_rsp_to_init_cmd_copies.read(); copy_t mask = 0x1; for ( size_t i=0 ; i<8*sizeof(copy_t) ; i++ ) { if ( copies & mask ) { r_init_cmd_target = i; break; } mask = mask << 1; } // end for m_cpt_inval_mult++; r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; r_xram_rsp_to_init_cmd_copies = copies & ~mask; } break; } //////////////////////// case INIT_CMD_INVAL_NLINE: // send the cache line index { if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_INVAL_SEL; break; } /////////////////////// case INIT_CMD_UPDT_SEL: // selects the next L1 cache { if (r_write_to_init_cmd_copies.read() == 0) { // no more copies r_write_to_init_cmd_req = false; r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; } else { // select the first target copy_t copies = r_write_to_init_cmd_copies.read(); copy_t mask = 0x1; for ( size_t i=0 ; i<8*sizeof(copy_t) ; i++ ) { if ( copies & mask ) { r_init_cmd_target = i; break; } mask = mask << 1; } // end for r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; r_write_to_init_cmd_copies = copies & ~mask; r_init_cmd_cpt = 0; m_cpt_update_mult++; } break; } ///////////////////////// case INIT_CMD_UPDT_NLINE: // send the cache line index { if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_INDEX; break; } ///////////////////////// case INIT_CMD_UPDT_INDEX: // send the first word index { if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_DATA; break; } //////////////////////// case INIT_CMD_UPDT_DATA: // send the data { if ( p_vci_ini.cmdack ) { if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) { r_init_cmd_fsm = INIT_CMD_UPDT_SEL; } else { r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; } } break; } } // end switch r_init_cmd_fsm ///////////////////////////////////////////////////////////////////// // TGT_RSP FSM ///////////////////////////////////////////////////////////////////// // The TGT_RSP fsm sends the responses on the VCI target port // with a round robin priority between six requests : // - r_read_to_tgt_rsp_req // - r_write_to_tgt_rsp_req // - r_llsc_to_tgt_rsp_req // - r_cleanup_to_tgt_rsp_req // - r_init_rsp_to_tgt_rsp_req // - r_xram_rsp_to_tgt_rsp_req // The ordering is : read > write > llsc > cleanup > xram > init ///////////////////////////////////////////////////////////////////// switch ( r_tgt_rsp_fsm.read() ) { /////////////////////// case TGT_RSP_READ_IDLE: // write requests have the highest priority { if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; break; } //////////////////////// case TGT_RSP_WRITE_IDLE: // llsc requests have the highest priority { if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; break; } /////////////////////// case TGT_RSP_LLSC_IDLE: // cleanup requests have the highest priority { if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; break; } ////////////////////////// case TGT_RSP_CLEANUP_IDLE: // xram requests have the highest priority { if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; break; } /////////////////////// case TGT_RSP_XRAM_IDLE: // init requests have the highest priority { if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; break; } /////////////////////// case TGT_RSP_INIT_IDLE: // read requests have the highest priority { if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; break; } /////////////////////// case TGT_RSP_READ_TEST: // test if word or cache line { bool line = true; size_t index; for ( size_t i=0; i< m_words ; i++ ) { line = line && r_read_to_tgt_rsp_val[i]; if ( r_read_to_tgt_rsp_val[i] ) index = i; } if ( line ) { r_tgt_rsp_cpt = 0; r_tgt_rsp_fsm = TGT_RSP_READ_LINE; } else { r_tgt_rsp_cpt = index; r_tgt_rsp_fsm = TGT_RSP_READ_WORD; } break; } /////////////////////// case TGT_RSP_READ_WORD: // send one word response { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; r_read_to_tgt_rsp_req = false; } break; } /////////////////////// case TGT_RSP_READ_LINE: // send one complete cache line { if ( p_vci_tgt.rspack ) { if ( r_tgt_rsp_cpt.read() == (m_words-1) ) { r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; r_read_to_tgt_rsp_req = false; } else { r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; } } break; } /////////////////// case TGT_RSP_WRITE: // send the write acknowledge { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; r_write_to_tgt_rsp_req = false; } break; } ////////////////// case TGT_RSP_LLSC: // send one atomic word response { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_LLSC_IDLE; r_llsc_to_tgt_rsp_req = false; } break; } ///////////////////// case TGT_RSP_CLEANUP: // send the cleanup acknowledge { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; r_cleanup_to_tgt_rsp_req = false; } break; } /////////////////////// case TGT_RSP_XRAM_TEST: // test if word or cache line { bool line = true; size_t index; for ( size_t i=0; i< m_words ; i++ ) { line = line && r_xram_rsp_to_tgt_rsp_val[i]; if ( r_xram_rsp_to_tgt_rsp_val[i] ) index = i; } if ( line ) { r_tgt_rsp_cpt = 0; r_tgt_rsp_fsm = TGT_RSP_XRAM_LINE; } else { r_tgt_rsp_cpt = index; r_tgt_rsp_fsm = TGT_RSP_XRAM_WORD; } break; } /////////////////////// case TGT_RSP_XRAM_WORD: // send one word response { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; r_xram_rsp_to_tgt_rsp_req = false; } break; } /////////////////////// case TGT_RSP_XRAM_LINE: // send one complete cache line { if ( p_vci_tgt.rspack ) { if ( r_tgt_rsp_cpt.read() == (m_words-1) ) { r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; r_xram_rsp_to_tgt_rsp_req = false; } else { r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; } } break; } /////////////////// case TGT_RSP_INIT: // send the pending write acknowledge { if ( p_vci_tgt.rspack ) { r_tgt_rsp_fsm = TGT_RSP_INIT_IDLE; r_init_rsp_to_tgt_rsp_req = false; } break; } } // end switch tgt_rsp_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_UPT FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT). // with a round robin priority between three FSMs : INIT_RSP > WRITE > XRAM_RSP // - The WRITE FSM initiates update transactions and sets new entry in UPT. // - The XRAM_RSP FSM initiates inval transactions and sets new entry in UPT. // - The INIT_RSP FSM complete those trasactions and erase the UPT entry. // The ressource is always allocated. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_alloc_upt_fsm.read() ) { //////////////////////// case ALLOC_UPT_INIT_RSP: if ( (r_init_rsp_fsm.read() != INIT_RSP_UPT_LOCK) && (r_init_rsp_fsm.read() != INIT_RSP_UPT_CLEAR) ) { if (r_write_fsm.read()==WRITE_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_WRITE; else if (r_xram_rsp_fsm.read()==XRAM_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; } break; ///////////////////// case ALLOC_UPT_WRITE: if ( r_write_fsm.read() != WRITE_UPT_LOCK ) { if (r_xram_rsp_fsm.read()==XRAM_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; else if (r_init_rsp_fsm.read()==INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; } break; //////////////////////// case ALLOC_UPT_XRAM_RSP: if ( r_xram_rsp_fsm.read() != XRAM_RSP_UPT_LOCK ) { if (r_init_rsp_fsm.read()==INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; else if (r_write_fsm.read()==WRITE_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_WRITE; } break; } // end switch r_alloc_upt_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_DIR FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_DIR FSM allocates the access to the directory and // the data cache with a round robin priority between 5 user FSMs : // The cyclic ordering is READ > WRITE > LLSC > CLEANUP > XRAM_RSP // The ressource is always allocated. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_alloc_dir_fsm.read() ) { //////////////////// case ALLOC_DIR_READ: if ( ( (r_read_fsm.read() != READ_DIR_LOCK) && (r_read_fsm.read() != READ_TRT_LOCK) ) || ( (r_read_fsm.read() == READ_TRT_LOCK) && (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) ) ) { if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; } break; ///////////////////// case ALLOC_DIR_WRITE: if ( ( (r_write_fsm.read() != WRITE_DIR_LOCK) && (r_write_fsm.read() != WRITE_TRT_LOCK) && (r_write_fsm.read() != WRITE_DIR_HIT_READ) ) || ( (r_write_fsm.read() == WRITE_TRT_LOCK) && (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) ) ) { if ((r_llsc_fsm.read()==LL_DIR_LOCK) || (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; } break; //////////////////// case ALLOC_DIR_LLSC: if ( ( (r_llsc_fsm.read() != LL_DIR_LOCK) && (r_llsc_fsm.read() != LL_DIR_HIT ) && (r_llsc_fsm.read() != SC_DIR_LOCK) && (r_llsc_fsm.read() != SC_DIR_HIT ) && (r_llsc_fsm.read() != LLSC_TRT_LOCK ) ) || ( (r_llsc_fsm.read() == LLSC_TRT_LOCK ) && (r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC) ) ) { if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; } break; /////////////////////// case ALLOC_DIR_CLEANUP: if ( (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) ) { if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; } break; //////////////////////// case ALLOC_DIR_XRAM_RSP: if ( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) && (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) ) { if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; } break; } // end switch alloc_dir_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_TRT FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) // with a round robin priority between 4 user FSMs : // The cyclic priority is READ > WRITE > LLSC > XRAM_RSP // The ressource is always allocated. /////////////////////////////////////////////////////////////////////////////////// switch (r_alloc_trt_fsm) { //////////////////// case ALLOC_TRT_READ: if ( r_read_fsm.read() != READ_TRT_LOCK ) { if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; } break; ///////////////////// case ALLOC_TRT_WRITE: if ( r_write_fsm.read() != WRITE_TRT_LOCK ) { if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; } break; //////////////////// case ALLOC_TRT_LLSC: if ( r_llsc_fsm.read() != LLSC_TRT_LOCK ) { if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; } break; //////////////////////// case ALLOC_TRT_XRAM_RSP: if ( (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) ) { if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; } break; //////////////////////// case ALLOC_TRT_IXR_RSP: if ( (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) && (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ) ) { if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; } break; } // end switch alloc_trt_fsm //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD to READ FIFO //////////////////////////////////////////////////////////////////////////////////// if ( cmd_read_fifo_put ) { if ( cmd_read_fifo_get ) { m_cmd_read_addr_fifo.put_and_get(p_vci_tgt.address.read()); m_cmd_read_word_fifo.put_and_get((p_vci_tgt.plen.read() == 4)); m_cmd_read_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_read_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_read_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); } else { m_cmd_read_addr_fifo.simple_put(p_vci_tgt.address.read()); m_cmd_read_word_fifo.simple_put((p_vci_tgt.plen.read() == 4)); m_cmd_read_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_read_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_read_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); } } else { if ( cmd_read_fifo_get ) { m_cmd_read_addr_fifo.simple_get(); m_cmd_read_word_fifo.simple_get(); m_cmd_read_srcid_fifo.simple_get(); m_cmd_read_trdid_fifo.simple_get(); m_cmd_read_pktid_fifo.simple_get(); } } ///////////////////////////////////////////////////////////////////// // TGT_CMD to WRITE FIFO ///////////////////////////////////////////////////////////////////// if ( cmd_write_fifo_put ) { if ( cmd_write_fifo_get ) { m_cmd_write_addr_fifo.put_and_get(p_vci_tgt.address.read()); m_cmd_write_eop_fifo.put_and_get(p_vci_tgt.eop.read()); m_cmd_write_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_write_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_write_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); m_cmd_write_data_fifo.put_and_get(p_vci_tgt.wdata.read()); m_cmd_write_be_fifo.put_and_get(p_vci_tgt.be.read()); } else { m_cmd_write_addr_fifo.simple_put(p_vci_tgt.address.read()); m_cmd_write_eop_fifo.simple_put(p_vci_tgt.eop.read()); m_cmd_write_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_write_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_write_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); m_cmd_write_data_fifo.simple_put(p_vci_tgt.wdata.read()); m_cmd_write_be_fifo.simple_put(p_vci_tgt.be.read()); } } else { if ( cmd_write_fifo_get ) { m_cmd_write_addr_fifo.simple_get(); m_cmd_write_eop_fifo.simple_get(); m_cmd_write_srcid_fifo.simple_get(); m_cmd_write_trdid_fifo.simple_get(); m_cmd_write_pktid_fifo.simple_get(); m_cmd_write_data_fifo.simple_get(); m_cmd_write_be_fifo.simple_get(); } } //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD to LLSC FIFO //////////////////////////////////////////////////////////////////////////////////// if ( cmd_llsc_fifo_put ) { if ( cmd_llsc_fifo_get ) { m_cmd_llsc_addr_fifo.put_and_get(p_vci_tgt.address.read()); m_cmd_llsc_sc_fifo.put_and_get(p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND); m_cmd_llsc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_llsc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_llsc_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); m_cmd_llsc_wdata_fifo.put_and_get(p_vci_tgt.wdata.read()); } else { m_cmd_llsc_addr_fifo.simple_put(p_vci_tgt.address.read()); m_cmd_llsc_sc_fifo.simple_put(p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND); m_cmd_llsc_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_llsc_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_llsc_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); m_cmd_llsc_wdata_fifo.simple_put(p_vci_tgt.wdata.read()); } } else { if ( cmd_llsc_fifo_get ) { m_cmd_llsc_addr_fifo.simple_get(); m_cmd_llsc_sc_fifo.simple_get(); m_cmd_llsc_srcid_fifo.simple_get(); m_cmd_llsc_trdid_fifo.simple_get(); m_cmd_llsc_pktid_fifo.simple_get(); m_cmd_llsc_wdata_fifo.simple_get(); } } //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD to CLEANUP FIFO //////////////////////////////////////////////////////////////////////////////////// if ( cmd_cleanup_fifo_put ) { if ( cmd_cleanup_fifo_get ) { m_cmd_cleanup_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_cleanup_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_cleanup_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); m_cmd_cleanup_nline_fifo.put_and_get(p_vci_tgt.wdata.read()); } else { m_cmd_cleanup_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_cleanup_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_cleanup_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); m_cmd_cleanup_nline_fifo.simple_put(p_vci_tgt.wdata.read()); } } else { if ( cmd_cleanup_fifo_get ) { m_cmd_cleanup_srcid_fifo.simple_get(); m_cmd_cleanup_trdid_fifo.simple_get(); m_cmd_cleanup_pktid_fifo.simple_get(); m_cmd_cleanup_nline_fifo.simple_get(); } } m_cpt_cycles++; } // end transition() ///////////////////////////// tmpl(void)::genMoore() ///////////////////////////// { //////////////////////////////////////////////////////////// // Command signals on the p_vci_ixr port //////////////////////////////////////////////////////////// p_vci_ixr.be = 0xF; p_vci_ixr.pktid = 0; p_vci_ixr.srcid = m_srcid_ixr; p_vci_ixr.cons = false; p_vci_ixr.wrap = false; p_vci_ixr.contig = true; p_vci_ixr.clen = 0; p_vci_ixr.cfixed = false; if ( r_xram_cmd_fsm.read() == XRAM_CMD_READ_NLINE ) { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (r_read_to_xram_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_read_to_xram_cmd_trdid.read(); p_vci_ixr.eop = true; } else if ( r_xram_cmd_fsm.read() == XRAM_CMD_LLSC_NLINE ) { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (r_llsc_to_xram_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_llsc_to_xram_cmd_trdid.read(); p_vci_ixr.eop = true; } else if ( r_xram_cmd_fsm.read() == XRAM_CMD_WRITE_NLINE ) { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (r_write_to_xram_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_write_to_xram_cmd_trdid.read(); p_vci_ixr.eop = true; } else if ( r_xram_cmd_fsm.read() == XRAM_CMD_XRAM_DATA ) { p_vci_ixr.cmd = vci_param::CMD_WRITE; p_vci_ixr.cmdval = true; p_vci_ixr.address = ((r_xram_rsp_to_xram_cmd_nline.read()*m_words+r_xram_cmd_cpt.read())*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = r_xram_rsp_to_xram_cmd_data[r_xram_cmd_cpt.read()].read(); p_vci_ixr.trdid = r_xram_rsp_to_xram_cmd_trdid.read(); p_vci_ixr.eop = (r_xram_cmd_cpt == (m_words-1)); } else { p_vci_ixr.cmdval = false; p_vci_ixr.address = 0; p_vci_ixr.plen = 0; p_vci_ixr.wdata = 0; p_vci_ixr.trdid = 0; p_vci_ixr.eop = false; } //////////////////////////////////////////////////// // Response signals on the p_vci_ixr port //////////////////////////////////////////////////// if ( ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) || (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) ) p_vci_ixr.rspack = true; else p_vci_ixr.rspack = false; //////////////////////////////////////////////////// // Command signals on the p_vci_tgt port //////////////////////////////////////////////////// switch ((tgt_cmd_fsm_state_e)r_tgt_cmd_fsm.read()) { case TGT_CMD_IDLE: p_vci_tgt.cmdack = false; break; case TGT_CMD_READ: p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); break; case TGT_CMD_READ_EOP: p_vci_tgt.cmdack = true; break; case TGT_CMD_WRITE: p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); break; case TGT_CMD_ATOMIC: p_vci_tgt.cmdack = m_cmd_llsc_addr_fifo.wok(); break; case TGT_CMD_CLEANUP: p_vci_tgt.cmdack = m_cmd_cleanup_nline_fifo.wok(); break; default: p_vci_tgt.cmdack = false; break; } //////////////////////////////////////////////////// // Response signals on the p_vci_tgt port //////////////////////////////////////////////////// switch ( r_tgt_rsp_fsm.read() ) { case TGT_RSP_READ_IDLE: case TGT_RSP_WRITE_IDLE: case TGT_RSP_LLSC_IDLE: case TGT_RSP_CLEANUP_IDLE: case TGT_RSP_XRAM_IDLE: case TGT_RSP_INIT_IDLE: case TGT_RSP_READ_TEST: case TGT_RSP_XRAM_TEST: p_vci_tgt.rspval = false; p_vci_tgt.rsrcid = 0; p_vci_tgt.rdata = 0; p_vci_tgt.rpktid = 0; p_vci_tgt.rtrdid = 0; p_vci_tgt.rerror = 0; p_vci_tgt.reop = false; break; case TGT_RSP_READ_LINE: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = (r_tgt_rsp_cpt.read()==(m_words-1)); break; case TGT_RSP_READ_WORD: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_WRITE: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_CLEANUP: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_LLSC: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_llsc_to_tgt_rsp_data.read(); p_vci_tgt.rsrcid = r_llsc_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_llsc_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_llsc_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_XRAM_LINE: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = (r_tgt_rsp_cpt.read()==(m_words-1)); break; case TGT_RSP_XRAM_WORD: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_INIT: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_init_rsp_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_init_rsp_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_init_rsp_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; } // end switch r_tgt_rsp_fsm /////////////////////////////////////////////////// // Command signals on the p_vci_ini port /////////////////////////////////////////////////// p_vci_ini.cmd = vci_param::CMD_WRITE; p_vci_ini.srcid = m_srcid_ini; p_vci_ini.pktid = 0; p_vci_ini.cons = true; p_vci_ini.wrap = false; p_vci_ini.contig = false; p_vci_ini.clen = 0; p_vci_ini.cfixed = false; switch ( r_init_cmd_fsm.read() ) { case INIT_CMD_UPDT_IDLE: case INIT_CMD_INVAL_IDLE: case INIT_CMD_UPDT_SEL: case INIT_CMD_INVAL_SEL: p_vci_ini.cmdval = false; p_vci_ini.address = 0; p_vci_ini.wdata = 0; p_vci_ini.be = 0; p_vci_ini.plen = 0; p_vci_ini.trdid = 0; p_vci_ini.eop = false; break; case INIT_CMD_INVAL_NLINE: p_vci_ini.cmdval = true; p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()]; p_vci_ini.wdata = r_xram_rsp_to_init_cmd_nline.read(); p_vci_ini.be = 0xF; p_vci_ini.plen = 4; p_vci_ini.trdid = r_xram_rsp_to_init_cmd_trdid.read(); p_vci_ini.eop = true; break; case INIT_CMD_UPDT_NLINE: p_vci_ini.cmdval = true; p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; p_vci_ini.wdata = r_write_to_init_cmd_nline.read(); p_vci_ini.be = 0xF; p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.eop = false; break; case INIT_CMD_UPDT_INDEX: p_vci_ini.cmdval = true; p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; p_vci_ini.wdata = r_write_to_init_cmd_index.read(); p_vci_ini.be = 0xF; p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.eop = false; break; case INIT_CMD_UPDT_DATA: p_vci_ini.cmdval = true; p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; p_vci_ini.wdata = r_write_to_init_cmd_data[r_init_cmd_cpt.read() + r_write_to_init_cmd_index.read()].read(); if(r_write_to_init_cmd_we[r_init_cmd_cpt.read() + r_write_to_init_cmd_index.read()].read()) p_vci_ini.be = 0xF; else p_vci_ini.be = 0x0; p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.eop = ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ); break; } // end switch r_init_cmd_fsm ////////////////////////////////////////////////////// // Response signals on the p_vci_ini port ////////////////////////////////////////////////////// if ( r_init_rsp_fsm.read() == INIT_RSP_IDLE ) p_vci_ini.rspack = true; else p_vci_ini.rspack = false; } // end genMoore() }} // end name space