/* -*- c++ -*- * File : vci_mem_cache_v4.cpp * Date : 30/10/2008 * Copyright : UPMC / LIP6 * Authors : Alain Greiner / Eric Guthmuller * * SOCLIB_LGPL_HEADER_BEGIN * * This file is part of SoCLib, GNU LGPLv2.1. * * SoCLib is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; version 2.1 of the License. * * SoCLib is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with SoCLib; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA * * SOCLIB_LGPL_HEADER_END * * Maintainers: alain eric.guthmuller@polytechnique.edu */ #include "../include/vci_mem_cache_v4.h" ////// debug services /////////////////////////////////////////////////////// // All debug messages are conditionned by two variables: // - compile time : DEBUG_MEMC_*** : defined below // - execution time : m_debug_*** : defined by constructor arguments // m_debug_* = (m_debug_ok) and (m_cpt_cycle > m_debug_start_cycle) ///////////////////////////////////////////////////////////////////////////////// #define DEBUG_MEMC_GLOBAL 0 // synthetic trace of all FSMs #define DEBUG_MEMC_READ 1 // detailed trace of READ FSM #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM #define DEBUG_MEMC_SC 1 // detailed trace of SC FSM #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_RSP FSM #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM #define DEBUG_MEMC_INIT_CMD 1 // detailed trace of INIT_CMD FSM #define DEBUG_MEMC_INIT_RSP 1 // detailed trace of INIT_RSP FSM #define DEBUG_MEMC_TGT_CMD 1 // detailed trace of TGT_CMD FSM #define DEBUG_MEMC_TGT_RSP 1 // detailed trace of TGT_RSP FSM #define DEBUG_MEMC_CLEANUP 1 // detailed trace of CLEANUP FSM #define RANDOMIZE_SC 1 namespace soclib { namespace caba { const char *tgt_cmd_fsm_str[] = { "TGT_CMD_IDLE", "TGT_CMD_READ", "TGT_CMD_WRITE", "TGT_CMD_ATOMIC", }; const char *tgt_rsp_fsm_str[] = { "TGT_RSP_READ_IDLE", "TGT_RSP_WRITE_IDLE", "TGT_RSP_SC_IDLE", "TGT_RSP_XRAM_IDLE", "TGT_RSP_INIT_IDLE", "TGT_RSP_CLEANUP_IDLE", "TGT_RSP_READ", "TGT_RSP_WRITE", "TGT_RSP_SC", "TGT_RSP_XRAM", "TGT_RSP_INIT", "TGT_RSP_CLEANUP", }; const char *init_cmd_fsm_str[] = { "INIT_CMD_INVAL_IDLE", "INIT_CMD_INVAL_NLINE", "INIT_CMD_XRAM_BRDCAST", "INIT_CMD_UPDT_IDLE", "INIT_CMD_WRITE_BRDCAST", "INIT_CMD_UPDT_NLINE", "INIT_CMD_UPDT_INDEX", "INIT_CMD_UPDT_DATA", "INIT_CMD_SC_UPDT_IDLE", "INIT_CMD_SC_BRDCAST", "INIT_CMD_SC_UPDT_NLINE", "INIT_CMD_SC_UPDT_INDEX", "INIT_CMD_SC_UPDT_DATA", "INIT_CMD_SC_UPDT_DATA_HIGH", }; const char *init_rsp_fsm_str[] = { "INIT_RSP_IDLE", "INIT_RSP_UPT_LOCK", "INIT_RSP_UPT_CLEAR", "INIT_RSP_END", }; const char *read_fsm_str[] = { "READ_IDLE", "READ_DIR_LOCK", "READ_DIR_HIT", "READ_HEAP_LOCK", "READ_HEAP_WRITE", "READ_HEAP_ERASE", "READ_HEAP_LAST", "READ_RSP", "READ_TRT_LOCK", "READ_TRT_SET", "READ_TRT_REQ", }; const char *write_fsm_str[] = { "WRITE_IDLE", "WRITE_NEXT", "WRITE_DIR_LOCK", "WRITE_DIR_READ", "WRITE_DIR_HIT", "WRITE_UPT_LOCK", "WRITE_UPT_HEAP_LOCK", "WRITE_UPT_REQ", "WRITE_UPT_NEXT", "WRITE_UPT_DEC", "WRITE_RSP", "WRITE_MISS_TRT_LOCK", "WRITE_MISS_TRT_DATA", "WRITE_MISS_TRT_SET", "WRITE_MISS_XRAM_REQ", "WRITE_BC_TRT_LOCK", "WRITE_BC_UPT_LOCK", "WRITE_BC_DIR_INVAL", "WRITE_BC_CC_SEND", "WRITE_BC_XRAM_REQ", "WRITE_WAIT", }; const char *ixr_rsp_fsm_str[] = { "IXR_RSP_IDLE", "IXR_RSP_ACK", "IXR_RSP_TRT_ERASE", "IXR_RSP_TRT_READ", }; const char *xram_rsp_fsm_str[] = { "XRAM_RSP_IDLE", "XRAM_RSP_TRT_COPY", "XRAM_RSP_TRT_DIRTY", "XRAM_RSP_DIR_LOCK", "XRAM_RSP_DIR_UPDT", "XRAM_RSP_DIR_RSP", "XRAM_RSP_INVAL_LOCK", "XRAM_RSP_INVAL_WAIT", "XRAM_RSP_INVAL", "XRAM_RSP_WRITE_DIRTY", "XRAM_RSP_HEAP_ERASE", "XRAM_RSP_HEAP_LAST", "XRAM_RSP_ERROR_ERASE", "XRAM_RSP_ERROR_RSP", }; const char *ixr_cmd_fsm_str[] = { "IXR_CMD_READ_IDLE", "IXR_CMD_WRITE_IDLE", "IXR_CMD_SC_IDLE", "IXR_CMD_XRAM_IDLE", "IXR_CMD_READ_NLINE", "IXR_CMD_WRITE_NLINE", "IXR_CMD_SC_NLINE", "IXR_CMD_XRAM_DATA", }; const char *sc_fsm_str[] = { "SC_IDLE", "SC_DIR_LOCK", "SC_DIR_HIT_READ", "SC_DIR_HIT_WRITE", "SC_UPT_LOCK", "SC_UPT_HEAP_LOCK", "SC_UPT_REQ", "SC_UPT_NEXT", "SC_BC_TRT_LOCK", "SC_BC_UPT_LOCK", "SC_BC_DIR_INVAL", "SC_BC_CC_SEND", "SC_BC_XRAM_REQ", "SC_RSP_FAIL", "SC_RSP_SUCCESS", "SC_MISS_TRT_LOCK", "SC_MISS_TRT_SET", "SC_MISS_XRAM_REQ", "SC_WAIT", }; const char *cleanup_fsm_str[] = { "CLEANUP_IDLE", "CLEANUP_DIR_LOCK", "CLEANUP_DIR_WRITE", "CLEANUP_HEAP_LOCK", "CLEANUP_HEAP_SEARCH", "CLEANUP_HEAP_CLEAN", "CLEANUP_HEAP_FREE", "CLEANUP_UPT_LOCK", "CLEANUP_UPT_WRITE", "CLEANUP_WRITE_RSP", "CLEANUP_RSP", }; const char *alloc_dir_fsm_str[] = { "ALLOC_DIR_READ", "ALLOC_DIR_WRITE", "ALLOC_DIR_SC", "ALLOC_DIR_CLEANUP", "ALLOC_DIR_XRAM_RSP", }; const char *alloc_trt_fsm_str[] = { "ALLOC_TRT_READ", "ALLOC_TRT_WRITE", "ALLOC_TRT_SC", "ALLOC_TRT_XRAM_RSP", "ALLOC_TRT_IXR_RSP", }; const char *alloc_upt_fsm_str[] = { "ALLOC_UPT_WRITE", "ALLOC_UPT_XRAM_RSP", "ALLOC_UPT_INIT_RSP", "ALLOC_UPT_CLEANUP", }; const char *alloc_heap_fsm_str[] = { "ALLOC_HEAP_READ", "ALLOC_HEAP_WRITE", "ALLOC_HEAP_SC", "ALLOC_HEAP_CLEANUP", "ALLOC_HEAP_XRAM_RSP", }; #define tmpl(x) template x VciMemCacheV4 using soclib::common::uint32_log2; //////////////////////////////// // Constructor //////////////////////////////// tmpl(/**/)::VciMemCacheV4( sc_module_name name, const soclib::common::MappingTable &mtp, const soclib::common::MappingTable &mtc, const soclib::common::MappingTable &mtx, const soclib::common::IntTab &vci_ixr_index, const soclib::common::IntTab &vci_ini_index, const soclib::common::IntTab &vci_tgt_index, const soclib::common::IntTab &vci_tgt_index_cleanup, size_t nways, // number of ways per set size_t nsets, // number of cache sets size_t nwords, // number of words in cache line size_t heap_size, // number of heap entries size_t transaction_tab_lines, // number of TRT entries size_t update_tab_lines, // number of UPT entries size_t debug_start_cycle, bool debug_ok) : soclib::caba::BaseModule(name), m_debug_start_cycle( debug_start_cycle), m_debug_ok ( debug_ok ), p_clk("clk"), p_resetn("resetn"), p_vci_tgt("vci_tgt"), p_vci_tgt_cleanup("vci_tgt_cleanup"), p_vci_ini("vci_ini"), p_vci_ixr("vci_ixr"), m_initiators( 1 << vci_param::S ), m_heap_size( heap_size ), m_ways( nways ), m_sets( nsets ), m_words( nwords ), m_srcid_ixr( mtx.indexForId(vci_ixr_index) ), m_srcid_ini( mtc.indexForId(vci_ini_index) ), m_seglist(mtp.getSegmentList(vci_tgt_index)), m_cseglist(mtc.getSegmentList(vci_tgt_index_cleanup)), m_transaction_tab_lines(transaction_tab_lines), m_transaction_tab( transaction_tab_lines, nwords ), m_update_tab_lines( update_tab_lines), m_update_tab( update_tab_lines ), m_cache_directory( nways, nsets, nwords, vci_param::N ), m_heap( m_heap_size ), #define L2 soclib::common::uint32_log2 m_x( L2(m_words), 2), m_y( L2(m_sets), L2(m_words) + 2), m_z( vci_param::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), m_nline( vci_param::N - L2(m_words) - 2, L2(m_words) + 2), #undef L2 // FIFOs m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), m_cmd_sc_addr_fifo("m_cmd_sc_addr_fifo",4), m_cmd_sc_eop_fifo("m_cmd_sc_eop_fifo",4), m_cmd_sc_srcid_fifo("m_cmd_sc_srcid_fifo",4), m_cmd_sc_trdid_fifo("m_cmd_sc_trdid_fifo",4), m_cmd_sc_pktid_fifo("m_cmd_sc_pktid_fifo",4), m_cmd_sc_wdata_fifo("m_cmd_sc_wdata_fifo",4), r_tgt_cmd_fsm("r_tgt_cmd_fsm"), m_nseg(0), m_ncseg(0), r_read_fsm("r_read_fsm"), r_write_fsm("r_write_fsm"), m_write_to_init_cmd_inst_fifo("m_write_to_init_cmd_inst_fifo",8), m_write_to_init_cmd_srcid_fifo("m_write_to_init_cmd_srcid_fifo",8), #if L1_MULTI_CACHE m_write_to_init_cmd_cache_id_fifo("m_write_to_init_cmd_cache_id_fifo",8), #endif r_init_rsp_fsm("r_init_rsp_fsm"), r_cleanup_fsm("r_cleanup_fsm"), r_sc_fsm("r_sc_fsm"), m_sc_to_init_cmd_inst_fifo("m_sc_to_init_cmd_inst_fifo",8), m_sc_to_init_cmd_srcid_fifo("m_sc_to_init_cmd_srcid_fifo",8), #if L1_MULTI_CACHE m_sc_to_init_cmd_cache_id_fifo("m_sc_to_init_cmd_cache_id_fifo",8), #endif r_ixr_rsp_fsm("r_ixr_rsp_fsm"), r_xram_rsp_fsm("r_xram_rsp_fsm"), m_xram_rsp_to_init_cmd_inst_fifo("m_xram_rsp_to_init_cmd_inst_fifo",8), m_xram_rsp_to_init_cmd_srcid_fifo("m_xram_rsp_to_init_cmd_srcid_fifo",8), #if L1_MULTI_CACHE m_xram_rsp_to_init_cmd_cache_id_fifo("m_xram_rsp_to_init_cmd_cache_id_fifo",8), #endif r_ixr_cmd_fsm("r_ixr_cmd_fsm"), r_tgt_rsp_fsm("r_tgt_rsp_fsm"), r_init_cmd_fsm("r_init_cmd_fsm"), r_alloc_dir_fsm("r_alloc_dir_fsm"), r_alloc_trt_fsm("r_alloc_trt_fsm"), r_alloc_upt_fsm("r_alloc_upt_fsm"), r_alloc_heap_fsm("r_alloc_heap_fsm") { assert(IS_POW_OF_2(nsets)); assert(IS_POW_OF_2(nwords)); assert(IS_POW_OF_2(nways)); assert(nsets); assert(nwords); assert(nways); // check Transaction table size assert( (uint32_log2(transaction_tab_lines) <= vci_param::T) and "Need more bits for VCI TRDID field"); // Set the broadcast address with Xmin,Xmax,Ymin,Ymax set to maximum m_broadcast_address = 0x3 | (0x7C1F << (vci_param::N-20)); // Get the segments associated to the MemCache std::list::iterator seg; size_t i; for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) { m_nseg++; } for(seg = m_cseglist.begin(); seg != m_cseglist.end() ; seg++) { m_ncseg++; } m_seg = new soclib::common::Segment*[m_nseg]; i = 0; for ( seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++ ) { m_seg[i] = &(*seg); i++; } m_cseg = new soclib::common::Segment*[m_ncseg]; i = 0; for ( seg = m_cseglist.begin() ; seg != m_cseglist.end() ; seg++ ) { m_cseg[i] = &(*seg); i++; } // Memory cache allocation & initialisation m_cache_data = new data_t**[nways]; for ( size_t i=0 ; i[m_transaction_tab_lines]; // Allocation for XRAM_RSP FSM r_xram_rsp_victim_data = new sc_signal[nwords]; r_xram_rsp_to_tgt_rsp_data = new sc_signal[nwords]; r_xram_rsp_to_ixr_cmd_data = new sc_signal[nwords]; // Allocation for READ FSM r_read_data = new sc_signal[nwords]; r_read_to_tgt_rsp_data = new sc_signal[nwords]; // Allocation for WRITE FSM r_write_data = new sc_signal[nwords]; r_write_be = new sc_signal[nwords]; r_write_to_init_cmd_data = new sc_signal[nwords]; r_write_to_init_cmd_be = new sc_signal[nwords]; r_write_to_ixr_cmd_data = new sc_signal[nwords]; // Allocation for SC FSM r_sc_to_ixr_cmd_data = new sc_signal[nwords]; r_sc_rdata = new sc_signal[2]; // Simulation SC_METHOD(transition); dont_initialize(); sensitive << p_clk.pos(); SC_METHOD(genMoore); dont_initialize(); sensitive << p_clk.neg(); } // end constructor /////////////////////////////////////////////////////////////////////// tmpl(void)::start_monitor( vci_addr_t addr, vci_addr_t length ) /////////////////////////////////////////////////////////////////////// { m_monitor_ok = true; m_monitor_base = addr; m_monitor_length = length; } /////////////////////////////////////////////////////////////////////// tmpl(void)::stop_monitor() /////////////////////////////////////////////////////////////////////// { m_monitor_ok = false; } /////////////////////////////////////////////////////////////////////// tmpl(void)::check_monitor( const char *buf, vci_addr_t addr, data_t data ) /////////////////////////////////////////////////////////////////////// { if ( (addr >= m_monitor_base) and (addr < m_monitor_base + m_monitor_length) ) { std::cout << " MEMC Write Monitor : " << buf << " Address = " << std::hex << addr << " / Data = " << data << std::endl; } } ///////////////////////////////////////////////////// tmpl(void)::copies_monitor( vci_addr_t addr ) ///////////////////////////////////////////////////// { DirectoryEntry entry = m_cache_directory.read_neutral(addr); if ( (entry.count != m_debug_previous_count) or (entry.valid != m_debug_previous_hit) ) { std::cout << " MEMC " << name() << " cache change at cycle " << std::dec << m_cpt_cycles << " for address " << std::hex << addr << " / HIT = " << entry.valid << " / COUNT = " << std::dec << entry.count << std::endl; } m_debug_previous_count = entry.count; m_debug_previous_hit = entry.valid; } ////////////////////////////////////////////////// tmpl(void)::print_trace() ////////////////////////////////////////////////// { std::cout << "MEMC " << name() << std::endl; std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] << " | " << read_fsm_str[r_read_fsm] << " | " << write_fsm_str[r_write_fsm] << " | " << sc_fsm_str[r_sc_fsm] << " | " << cleanup_fsm_str[r_cleanup_fsm] << std::endl; std::cout << " " << init_cmd_fsm_str[r_init_cmd_fsm] << " | " << init_rsp_fsm_str[r_init_rsp_fsm] << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; } ///////////////////////////////////////// tmpl(void)::print_stats() ///////////////////////////////////////// { std::cout << "----------------------------------" << std::dec << std::endl; std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl << "- READ RATE = " << (double)m_cpt_read/m_cpt_cycles << std::endl << "- READ TOTAL = " << m_cpt_read << std::endl << "- READ MISS RATE = " << (double)m_cpt_read_miss/m_cpt_read << std::endl << "- WRITE RATE = " << (double)m_cpt_write/m_cpt_cycles << std::endl << "- WRITE TOTAL = " << m_cpt_write << std::endl << "- WRITE MISS RATE = " << (double)m_cpt_write_miss/m_cpt_write << std::endl << "- WRITE BURST LENGTH = " << (double)m_cpt_write_cells/m_cpt_write << std::endl << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl << "- UPDATE RATE = " << (double)m_cpt_update/m_cpt_cycles << std::endl << "- UPDATE ARITY = " << (double)m_cpt_update_mult/m_cpt_update << std::endl << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast)/m_cpt_cycles << std::endl << "- INVAL MULTICAST ARITY= " << (double)m_cpt_inval_mult/(m_cpt_inval-m_cpt_inval_brdcast) << std::endl << "- INVAL BROADCAST RATE = " << (double)m_cpt_inval_brdcast/m_cpt_cycles << std::endl << "- SAVE DIRTY RATE = " << (double)m_cpt_write_dirty/m_cpt_cycles << std::endl << "- CLEANUP RATE = " << (double)m_cpt_cleanup/m_cpt_cycles << std::endl << "- LL RATE = " << (double)m_cpt_ll/m_cpt_cycles << std::endl << "- SC RATE = " << (double)m_cpt_sc/m_cpt_cycles << std::endl; } ///////////////////////////////// tmpl(/**/)::~VciMemCacheV4() ///////////////////////////////// { for(size_t i=0; i m_debug_start_cycle) and m_debug_ok; m_debug_tgt_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_tgt_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_init_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_init_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_read_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_write_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_sc_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_cleanup_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_ixr_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_ixr_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; m_debug_xram_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; #if DEBUG_MEMC_GLOBAL if( m_debug_global ) { std::cout << "---------------------------------------------" << std::dec << std::endl; std::cout << "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] << std::endl << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] << std::endl << " - INIT_CMD FSM = " << init_cmd_fsm_str[r_init_cmd_fsm] << std::endl << " - INIT_RSP FSM = " << init_rsp_fsm_str[r_init_rsp_fsm] << std::endl << " - READ FSM = " << read_fsm_str[r_read_fsm] << std::endl << " - WRITE FSM = " << write_fsm_str[r_write_fsm] << std::endl << " - SC FSM = " << sc_fsm_str[r_sc_fsm] << std::endl << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm] << std::endl << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] << std::endl << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] << std::endl << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm] << std::endl << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm] << std::endl << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm] << std::endl << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm] << std::endl; } #endif //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD FSM //////////////////////////////////////////////////////////////////////////////////// // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors // // There is 3 types of accepted commands : // - READ : a READ request has a length of 1 VCI cell. It can be a single word // or an entire cache line, depending on the PLEN value. // - WRITE : a WRITE request has a maximum length of 16 cells, and can only // concern words in a same line. // - SC : The SC request has a length of 2 cells or 4 cells. //////////////////////////////////////////////////////////////////////////////////// switch ( r_tgt_cmd_fsm.read() ) { ////////////////// case TGT_CMD_IDLE: { if ( p_vci_tgt.cmdval ) { #if DEBUG_MEMC_TGT_CMD if( m_debug_tgt_cmd_fsm ) { std::cout << " Receive command from srcid " << std::dec << p_vci_tgt.srcid.read() << " / for address " << std::hex << p_vci_tgt.address.read() << std::endl; } #endif // checking segmentation violation vci_addr_t address = p_vci_tgt.address.read(); uint32_t plen = p_vci_tgt.plen.read(); bool found = false; for ( size_t seg_id = 0 ; seg_id < m_nseg ; seg_id++ ) { if ( m_seg[seg_id]->contains(address) && m_seg[seg_id]->contains(address + plen - vci_param::B) ) { found = true; } } if ( not found ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << std::endl; std::cout << "Out of segment VCI address in TGT_CMD_IDLE state" << std::endl; exit(0); } if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) { r_tgt_cmd_fsm = TGT_CMD_READ; } else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) { r_tgt_cmd_fsm = TGT_CMD_WRITE; } else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) { r_tgt_cmd_fsm = TGT_CMD_ATOMIC; } else { std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_IDLE state" << std::endl; std::cout << " illegal VCI command type" << std::endl; exit(0); } } break; } ////////////////// case TGT_CMD_READ: { if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; std::cout << " illegal address/plen combination for VCI read command" << std::endl; exit(0); } if ( !p_vci_tgt.eop.read() ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; std::cout << " read command packets must contain one single flit" << std::endl; exit(0); } if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) { #if DEBUG_MEMC_TGT_CMD if( m_debug_tgt_cmd_fsm ) { std::cout << " Push into read_fifo:" << " address = " << std::hex << p_vci_tgt.address.read() << " srcid = " << std::dec << p_vci_tgt.srcid.read() << " trdid = " << p_vci_tgt.trdid.read() << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; } #endif cmd_read_fifo_put = true; m_cpt_read++; r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } /////////////////// case TGT_CMD_WRITE: { if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) { #if DEBUG_MEMC_TGT_CMD if( m_debug_tgt_cmd_fsm ) { std::cout << " Push into write_fifo:" << " address = " << std::hex << p_vci_tgt.address.read() << " srcid = " << std::dec << p_vci_tgt.srcid.read() << " trdid = " << p_vci_tgt.trdid.read() << " wdata = " << std::hex << p_vci_tgt.wdata.read() << " be = " << p_vci_tgt.be.read() << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; } #endif cmd_write_fifo_put = true; if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } //////////////////// case TGT_CMD_ATOMIC: { if ( (p_vci_tgt.plen.read() != 8) && (p_vci_tgt.plen.read() != 16) ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_ATOMIC state" << std::endl; std::cout << "illegal format for sc command " << std::endl; exit(0); } if ( p_vci_tgt.cmdval && m_cmd_sc_addr_fifo.wok() ) { #if DEBUG_MEMC_TGT_CMD if( m_debug_tgt_cmd_fsm ) { std::cout << " Pushing command into cmd_sc_fifo:" << " address = " << std::hex << p_vci_tgt.address.read() << " srcid = " << std::dec << p_vci_tgt.srcid.read() << " trdid = " << p_vci_tgt.trdid.read() << " wdata = " << std::hex << p_vci_tgt.wdata.read() << " be = " << p_vci_tgt.be.read() << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; } #endif cmd_sc_fifo_put = true; if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; } break; } } // end switch tgt_cmd_fsm ///////////////////////////////////////////////////////////////////////// // INIT_RSP FSM ///////////////////////////////////////////////////////////////////////// // This FSM controls the response to the update or inval coherence // requests sent by the memory cache to the L1 caches and update the UPT. // // It can be update or inval requests initiated by the WRITE FSM, // or inval requests initiated by the XRAM_RSP FSM. // It can also be a direct request from the WRITE FSM. // // The FSM decrements the proper entry in UPT. // It sends a request to the TGT_RSP FSM to complete the pending // write transaction (acknowledge response to the writer processor), // and clear the UPT entry when all responses have been received. // // All those response packets are one word, compact // packets complying with the VCI advanced format. // The index in the Table is defined in the RTRDID field, and // the transaction type is defined in the UPT entry. ///////////////////////////////////////////////////////////////////// switch ( r_init_rsp_fsm.read() ) { /////////////////// case INIT_RSP_IDLE: // wait a response for a coherence transaction { if ( p_vci_ini.rspval ) { #if DEBUG_MEMC_INIT_RSP if( m_debug_init_rsp_fsm ) { std::cout << " Response for UPT entry " << p_vci_ini.rtrdid.read() << std::endl; } #endif if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " INIT_RSP_IDLE state" << std::endl << "index too large for UPT: " << " / rtrdid = " << std::dec << p_vci_ini.rtrdid.read() << " / UPT size = " << std::dec << m_update_tab.size() << std::endl; exit(0); } if ( !p_vci_ini.reop.read() ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " INIT_RSP_IDLE state" << std::endl; std::cout << "all coherence response packets must be one flit" << std::endl; exit(0); } r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); r_init_rsp_fsm = INIT_RSP_UPT_LOCK; } else if( r_write_to_init_rsp_req.read() ) { r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); r_write_to_init_rsp_req = false; r_init_rsp_fsm = INIT_RSP_UPT_LOCK; } break; } /////////////////////// case INIT_RSP_UPT_LOCK: // decrement the number of expected responses { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { size_t count = 0; bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); #if DEBUG_MEMC_INIT_RSP if( m_debug_init_rsp_fsm ) { std::cout << " Decrement the responses counter for UPT:" << " entry = " << r_init_rsp_upt_index.read() << " / rsp_count = " << std::dec << count << std::endl; } #endif if ( not valid ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " INIT_RSP_UPT_LOCK state" << std::endl << "unsuccessful access to decrement the UPT" << std::endl; exit(0); } if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; else r_init_rsp_fsm = INIT_RSP_IDLE; } break; } //////////////////////// case INIT_RSP_UPT_CLEAR: // clear the UPT entry { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; else r_init_rsp_fsm = INIT_RSP_IDLE; m_update_tab.clear(r_init_rsp_upt_index.read()); #if DEBUG_MEMC_INIT_RSP if ( m_debug_init_rsp_fsm ) { std::cout << " Clear UPT entry " << r_init_rsp_upt_index.read() << std::endl; } #endif } break; } ////////////////// case INIT_RSP_END: // Post a request to TGT_RSP FSM { if ( !r_init_rsp_to_tgt_rsp_req ) { r_init_rsp_to_tgt_rsp_req = true; r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); r_init_rsp_fsm = INIT_RSP_IDLE; #if DEBUG_MEMC_INIT_RSP if ( m_debug_init_rsp_fsm ) { std::cout << " Request TGT_RSP FSM to send a response to srcid " << r_init_rsp_srcid.read() << std::endl; } #endif } break; } } // end switch r_init_rsp_fsm //////////////////////////////////////////////////////////////////////////////////// // READ FSM //////////////////////////////////////////////////////////////////////////////////// // The READ FSM controls the VCI read requests. // It takes the lock protecting the cache directory to check the cache line status: // - In case of HIT // The fsm copies the data (one line, or one single word) // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. // The requesting initiator is registered in the cache directory. // If the number of copy is larger than 1, the new copy is registered // in the HEAP. // If the number of copy is larger than the threshold, the HEAP is cleared, // and the corresponding line switches to the counter mode. // - In case of MISS // The READ fsm takes the lock protecting the transaction tab. // If a read transaction to the XRAM for this line already exists, // or if the transaction tab is full, the fsm is stalled. // If a TRT entry is free, the READ request is registered in TRT, // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. // The READ FSM returns in the IDLE state as the read transaction will be // completed when the missing line will be received. //////////////////////////////////////////////////////////////////////////////////// switch ( r_read_fsm.read() ) { /////////////// case READ_IDLE: // waiting a read request { if (m_cmd_read_addr_fifo.rok()) { #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Read request:" << " srcid = " << std::dec << m_cmd_read_srcid_fifo.read() << " / address = " << std::hex << m_cmd_read_addr_fifo.read() << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; } #endif r_read_fsm = READ_DIR_LOCK; } break; } /////////////////// case READ_DIR_LOCK: // check directory for hit / miss { if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { size_t way = 0; DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); r_read_is_cnt = entry.is_cnt; r_read_dirty = entry.dirty; r_read_lock = entry.lock; r_read_tag = entry.tag; r_read_way = way; r_read_count = entry.count; r_read_copy = entry.owner.srcid; #if L1_MULTI_CACHE r_read_copy_cache = entry.owner.cache_id; #endif r_read_copy_inst = entry.owner.inst; r_read_ptr = entry.ptr; // pointer to the heap bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); if( entry.valid ) // hit { // test if we need to register a new copy in the heap if ( entry.is_cnt || (entry.count == 0) || !cached_read ) r_read_fsm = READ_DIR_HIT; else r_read_fsm = READ_HEAP_LOCK; } else // miss { r_read_fsm = READ_TRT_LOCK; } #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Accessing directory: " << " address = " << std::hex << m_cmd_read_addr_fifo.read() << " / hit = " << std::dec << entry.valid << " / count = " < Update directory entry:" << " set = " << std::dec << set << " / way = " << way << " / owner_id = " << entry.owner.srcid << " / owner_ins = " << entry.owner.inst << " / count = " << entry.count << " / is_cnt = " << entry.is_cnt << std::endl; } #endif m_cache_directory.write(set, way, entry); r_read_fsm = READ_RSP; } break; } //////////////////// case READ_HEAP_LOCK: // read data in cache, update the directory // and prepare the HEAP update { if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { // enter counter mode when we reach the limit of copies or the heap is full bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); // read data in the cache size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; size_t way = r_read_way.read(); for ( size_t i=0 ; i1 ) // heap must be cleared { HeapEntry next_entry = m_heap.read(r_read_ptr.read()); r_read_next_ptr = m_heap.next_free_ptr(); m_heap.write_free_ptr(r_read_ptr.read()); if( next_entry.next == r_read_ptr.read() ) // last entry { r_read_fsm = READ_HEAP_LAST; // erase the entry } else // not the last entry { r_read_ptr = next_entry.next; r_read_fsm = READ_HEAP_ERASE; // erase the list } } else // the heap is not used / nothing to do { r_read_fsm = READ_RSP; } } #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Update directory:" << " tag = " << std::hex << entry.tag << " set = " << std::dec << set << " way = " << way << " count = " << entry.count << " is_cnt = " << entry.is_cnt << std::endl; } #endif } break; } ///////////////////// case READ_HEAP_WRITE: // add a entry in the heap { if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { HeapEntry heap_entry; heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); #if L1_MULTI_CACHE heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); #endif heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); if(r_read_count.read() == 1) // creation of a new linked list { heap_entry.next = m_heap.next_free_ptr(); } else // head insertion in existing list { heap_entry.next = r_read_ptr.read(); } m_heap.write_free_entry(heap_entry); m_heap.write_free_ptr(r_read_next_ptr.read()); if(r_read_last_free.read()) m_heap.set_full(); r_read_fsm = READ_RSP; #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Add an entry in the heap:" << " owner_id = " << heap_entry.owner.srcid << " owner_ins = " << heap_entry.owner.inst << std::endl; } #endif } else { std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE state" << std::endl; std::cout << "Bad HEAP allocation" << std::endl; exit(0); } break; } ///////////////////// case READ_HEAP_ERASE: { if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { HeapEntry next_entry = m_heap.read(r_read_ptr.read()); if( next_entry.next == r_read_ptr.read() ) { r_read_fsm = READ_HEAP_LAST; } else { r_read_ptr = next_entry.next; r_read_fsm = READ_HEAP_ERASE; } } else { std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE state" << std::endl; std::cout << "Bad HEAP allocation" << std::endl; exit(0); } break; } //////////////////// case READ_HEAP_LAST: { if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { HeapEntry last_entry; last_entry.owner.srcid = 0; #if L1_MULTI_CACHE last_entry.owner.cache_id = 0; #endif last_entry.owner.inst = false; if(m_heap.is_full()) { last_entry.next = r_read_ptr.read(); m_heap.unset_full(); } else { last_entry.next = r_read_next_ptr.read(); } m_heap.write(r_read_ptr.read(),last_entry); r_read_fsm = READ_RSP; } else { std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST state" << std::endl; std::cout << "Bad HEAP allocation" << std::endl; exit(0); } break; } ////////////// case READ_RSP: // request the TGT_RSP FSM to return data { if( !r_read_to_tgt_rsp_req ) { for ( size_t i=0 ; i Request the TGT_RSP FSM to return data:" << " rsrcid = " << std::dec << m_cmd_read_srcid_fifo.read() << " / address = " << std::hex << m_cmd_read_addr_fifo.read() << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; } #endif } break; } /////////////////// case READ_TRT_LOCK: // read miss : check the Transaction Table { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { size_t index = 0; vci_addr_t addr = (vci_addr_t)m_cmd_read_addr_fifo.read(); bool hit_read = m_transaction_tab.hit_read(m_nline[addr], index); bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); bool wok = !m_transaction_tab.full(index); if( hit_read || !wok || hit_write ) // missing line already requested or no space { if(!wok) m_cpt_trt_full++; if(hit_read || hit_write) m_cpt_trt_rb++; r_read_fsm = READ_IDLE; } else // missing line is requested to the XRAM { m_cpt_read_miss++; r_read_trt_index = index; r_read_fsm = READ_TRT_SET; } #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Check TRT:" << " hit_read = " << hit_read << " / hit_write = " << hit_write << " / full = " << !wok << std::endl; } #endif } break; } ////////////////// case READ_TRT_SET: // register get transaction in TRT { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { m_transaction_tab.set(r_read_trt_index.read(), true, m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], m_cmd_read_srcid_fifo.read(), m_cmd_read_trdid_fifo.read(), m_cmd_read_pktid_fifo.read(), true, m_cmd_read_length_fifo.read(), m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], std::vector(m_words,0), std::vector(m_words,0)); #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Write in Transaction Table: " << std::hex << " address = " << std::hex << m_cmd_read_addr_fifo.read() << " / srcid = " << std::dec << m_cmd_read_srcid_fifo.read() << std::endl; } #endif r_read_fsm = READ_TRT_REQ; } break; } ////////////////// case READ_TRT_REQ: // consume the read request in the FIFO, // and send it to the ixr_cmd_fsm { if( not r_read_to_ixr_cmd_req ) { cmd_read_fifo_get = true; r_read_to_ixr_cmd_req = true; r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); r_read_fsm = READ_IDLE; #if DEBUG_MEMC_READ if( m_debug_read_fsm ) { std::cout << " Request GET transaction for address " << std::hex << m_cmd_read_addr_fifo.read() << std::endl; } #endif } break; } } // end switch read_fsm /////////////////////////////////////////////////////////////////////////////////// // WRITE FSM /////////////////////////////////////////////////////////////////////////////////// // The WRITE FSM handles the write bursts sent by the processors. // All addresses in a burst must be in the same cache line. // A complete write burst is consumed in the FIFO & copied to a local buffer. // Then the FSM takes the lock protecting the cache directory, to check // if the line is in the cache. // // - In case of HIT, the cache is updated. // If there is no other copy, an acknowledge response is immediately // returned to the writing processor. // If the data is cached by other processors, a coherence transaction must // be launched: // It is a multicast update if the line is not in counter mode, and the processor // takes the lock protecting the Update Table (UPT) to register this transaction. // It is a broadcast invalidate if the line is in counter mode. // If the UPT is full, it releases the lock(s) and retry. Then, it sends // a multi-update request to all owners of the line (but the writer), // through the INIT_CMD FSM. In case of coherence transaction, the WRITE FSM // does not respond to the writing processor, as this response will be sent by // the INIT_RSP FSM when all update responses have been received. // // - In case of MISS, the WRITE FSM takes the lock protecting the transaction // table (TRT). If a read transaction to the XRAM for this line already exists, // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, // the WRITE FSM register a new transaction in TRT, and sends a read line request // to the XRAM. If the TRT is full, it releases the lock, and waits. // Finally, the WRITE FSM returns an aknowledge response to the writing processor. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_write_fsm.read() ) { //////////////// case WRITE_IDLE: // copy first word of a write burst in local buffer { if ( m_cmd_write_addr_fifo.rok() ) { m_cpt_write++; m_cpt_write_cells++; // consume a word in the FIFO & write it in the local buffer cmd_write_fifo_get = true; size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); r_write_word_index = index; r_write_word_count = 1; r_write_data[index] = m_cmd_write_data_fifo.read(); r_write_srcid = m_cmd_write_srcid_fifo.read(); r_write_trdid = m_cmd_write_trdid_fifo.read(); r_write_pktid = m_cmd_write_pktid_fifo.read(); // initialize the be field for all words for ( size_t i=0 ; i Write request " << " srcid = " << std::dec << m_cmd_write_srcid_fifo.read() << " / address = " << std::hex << m_cmd_write_addr_fifo.read() << " / data = " << m_cmd_write_data_fifo.read() << std::endl; } #endif } break; } //////////////// case WRITE_NEXT: // copy next word of a write burst in local buffer { if ( m_cmd_write_addr_fifo.rok() ) { #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Write another word in local buffer" << std::endl; } #endif m_cpt_write_cells++; // check that the next word is in the same cache line if ( (m_nline[(vci_addr_t)(r_write_address.read())] != m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl; std::cout << "all words in a write burst must be in same cache line" << std::endl; exit(0); } // consume a word in the FIFO & write it in the local buffer cmd_write_fifo_get=true; size_t index = r_write_word_index.read() + r_write_word_count.read(); r_write_be[index] = m_cmd_write_be_fifo.read(); r_write_data[index] = m_cmd_write_data_fifo.read(); r_write_word_count = r_write_word_count.read() + 1; if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) r_write_byte = true; if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; } break; } //////////////////// case WRITE_DIR_LOCK: // access directory to check hit/miss { if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) { size_t way = 0; DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); if ( entry.valid ) // hit { // copy directory entry in local buffer in case of hit r_write_is_cnt = entry.is_cnt; r_write_lock = entry.lock; r_write_tag = entry.tag; r_write_copy = entry.owner.srcid; #if L1_MULTI_CACHE r_write_copy_cache = entry.owner.cache_id; #endif r_write_copy_inst = entry.owner.inst; r_write_count = entry.count; r_write_ptr = entry.ptr; r_write_way = way; if( entry.is_cnt && entry.count ) { r_write_fsm = WRITE_DIR_READ; } else { if (r_write_byte.read()) r_write_fsm = WRITE_DIR_READ; else r_write_fsm = WRITE_DIR_HIT; } } else // miss { r_write_fsm = WRITE_MISS_TRT_LOCK; } #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Check the directory: " << " address = " << std::hex << r_write_address.read() << " hit = " << std::dec << entry.valid << " count = " << entry.count << " is_cnt = " << entry.is_cnt << std::endl; } #endif } break; } //////////////////// case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF { // update local buffer size_t set = m_y[(vci_addr_t)(r_write_address.read())]; size_t way = r_write_way.read(); for(size_t i=0 ; i Read the cache to complete local buffer" << std::endl; } #endif break; } /////////////////// case WRITE_DIR_HIT: // update the cache directory { // update directory with Dirty bit DirectoryEntry entry; entry.valid = true; entry.dirty = true; entry.tag = r_write_tag.read(); entry.is_cnt = r_write_is_cnt.read(); entry.lock = r_write_lock.read(); entry.owner.srcid = r_write_copy.read(); #if L1_MULTI_CACHE entry.owner.cache_id = r_write_copy_cache.read(); #endif entry.owner.inst = r_write_copy_inst.read(); entry.count = r_write_count.read(); entry.ptr = r_write_ptr.read(); size_t set = m_y[(vci_addr_t)(r_write_address.read())]; size_t way = r_write_way.read(); // update directory m_cache_directory.write(set, way, entry); // owner is true when the the first registered copy is the writer itself bool owner = (((r_write_copy.read() == r_write_srcid.read()) #if L1_MULTI_CACHE and (r_write_copy_cache.read()==r_write_pktid.read()) #endif ) and not r_write_copy_inst.read()); // no_update is true when there is no need for coherence transaction bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); // write data in the cache if no coherence transaction if( no_update ) { for(size_t i=0 ; i Write into cache / No coherence transaction" << std::endl; } else { std::cout << " Coherence update required:" << " is_cnt = " << r_write_is_cnt.read() << " nb_copies = " << std::dec << r_write_count.read() << std::endl; if (owner) std::cout << " ... but the first copy is the writer" << std::endl; } } #endif break; } //////////////////// case WRITE_UPT_LOCK: // Try to register the update request in UPT { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { bool wok = false; size_t index = 0; size_t srcid = r_write_srcid.read(); size_t trdid = r_write_trdid.read(); size_t pktid = r_write_pktid.read(); addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; size_t nb_copies = r_write_count.read(); size_t set = m_y[(vci_addr_t)(r_write_address.read())]; size_t way = r_write_way.read(); wok = m_update_tab.set(true, // it's an update transaction false, // it's not a broadcast true, // it needs a response srcid, trdid, pktid, nline, nb_copies, index); if ( wok ) // write data in cache { for(size_t i=0 ; i Register the multicast update in UPT / " << " nb_copies = " << r_write_count.read() << std::endl; } } #endif r_write_upt_index = index; // releases the lock protecting UPT and the DIR if no entry... if ( wok ) r_write_fsm = WRITE_UPT_HEAP_LOCK; else r_write_fsm = WRITE_WAIT; } break; } ///////////////////////// case WRITE_UPT_HEAP_LOCK: // get access to heap { if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) { #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Get acces to the HEAP" << std::endl; } #endif r_write_fsm = WRITE_UPT_REQ; } break; } ////////////////// case WRITE_UPT_REQ: // prepare the coherence ransaction for the INIT_CMD FSM // and write the first copy in the FIFO // send the request if only one copy { if( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) // no pending coherence request { r_write_to_init_cmd_brdcast_req = false; r_write_to_init_cmd_trdid = r_write_upt_index.read(); r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; r_write_to_init_cmd_index = r_write_word_index.read(); r_write_to_init_cmd_count = r_write_word_count.read(); for(size_t i=0; i Post first request to INIT_CMD FSM" << " / srcid = " << std::dec << r_write_copy.read() << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; if ( r_write_count.read() == 1) std::cout << " ... and this is the last" << std::endl; } #endif } break; } /////////////////// case WRITE_UPT_NEXT: // continue the multi-update request to INIT_CMD fsm // when there is copies in the heap. // if one copy in the heap is the writer itself // the corresponding SRCID should not be written in the fifo, // but the UPT counter must be decremented. // As this decrement is done in the WRITE_UPT_DEC state, // after the last copy has been found, the decrement request // must be registered in the r_write_to_dec flip-flop. { HeapEntry entry = m_heap.read(r_write_ptr.read()); bool dec_upt_counter; if( (entry.owner.srcid != r_write_srcid.read()) or #if L1_MULTI_CACHE (entry.owner.cache_id != r_write_pktid.read()) or #endif entry.owner.inst) // put te next srcid in the fifo { dec_upt_counter = false; write_to_init_cmd_fifo_put = true; write_to_init_cmd_fifo_inst = entry.owner.inst; write_to_init_cmd_fifo_srcid = entry.owner.srcid; #if L1_MULTI_CACHE write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; #endif #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Post another request to INIT_CMD FSM" << " / heap_index = " << std::dec << r_write_ptr.read() << " / srcid = " << std::dec << r_write_copy.read() << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; if( entry.next == r_write_ptr.read() ) std::cout << " ... and this is the last" << std::endl; } #endif } else // the UPT counter must be decremented { dec_upt_counter = true; #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Skip one entry in heap matching the writer" << " / heap_index = " << std::dec << r_write_ptr.read() << " / srcid = " << std::dec << r_write_copy.read() << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; if( entry.next == r_write_ptr.read() ) std::cout << " ... and this is the last" << std::endl; } #endif } // register the possible UPT decrement request r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); if( not m_write_to_init_cmd_inst_fifo.wok() ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl << "The write_to_init_cmd_fifo should not be full" << std::endl << "as the depth should be larger than the max number of copies" << std::endl; exit(0); } r_write_ptr = entry.next; if( entry.next == r_write_ptr.read() ) // last copy { r_write_to_init_cmd_multi_req = true; if( r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; else r_write_fsm = WRITE_IDLE; } break; } ////////////////// case WRITE_UPT_DEC: // If the initial writer has a copy, it should not // receive an update request, but the counter in the // update table must be decremented by the INIT_RSP FSM. { if ( !r_write_to_init_rsp_req.read() ) { r_write_to_init_rsp_req = true; r_write_to_init_rsp_upt_index = r_write_upt_index.read(); r_write_fsm = WRITE_IDLE; } break; } /////////////// case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write // In order to increase the Write requests throughput, // we don't wait to return in the IDLE state to consume // a new request in the write FIFO { if ( !r_write_to_tgt_rsp_req.read() ) { // post the request to TGT_RSP_FSM r_write_to_tgt_rsp_req = true; r_write_to_tgt_rsp_srcid = r_write_srcid.read(); r_write_to_tgt_rsp_trdid = r_write_trdid.read(); r_write_to_tgt_rsp_pktid = r_write_pktid.read(); // try to get a new write request from the FIFO if ( m_cmd_write_addr_fifo.rok() ) { m_cpt_write++; m_cpt_write_cells++; // consume a word in the FIFO & write it in the local buffer cmd_write_fifo_get = true; size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); r_write_word_index = index; r_write_word_count = 1; r_write_data[index] = m_cmd_write_data_fifo.read(); r_write_srcid = m_cmd_write_srcid_fifo.read(); r_write_trdid = m_cmd_write_trdid_fifo.read(); r_write_pktid = m_cmd_write_pktid_fifo.read(); // initialize the be field for all words for ( size_t i=0 ; i Post a request to TGT_RSP FSM: rsrcid = " << std::dec << r_write_srcid.read() << std::endl; if ( m_cmd_write_addr_fifo.rok() ) { std::cout << " New Write request: " << " srcid = " << std::dec << m_cmd_write_srcid_fifo.read() << " / address = " << std::hex << m_cmd_write_addr_fifo.read() << " / data = " << m_cmd_write_data_fifo.read() << std::endl; } } #endif } break; } ///////////////////////// case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Check the TRT" << std::endl; } #endif size_t hit_index = 0; size_t wok_index = 0; vci_addr_t addr = (vci_addr_t)r_write_address.read(); bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); bool wok = !m_transaction_tab.full(wok_index); if ( hit_read ) // register the modified data in TRT { r_write_trt_index = hit_index; r_write_fsm = WRITE_MISS_TRT_DATA; m_cpt_write_miss++; } else if ( wok && !hit_write ) // set a new entry in TRT { r_write_trt_index = wok_index; r_write_fsm = WRITE_MISS_TRT_SET; m_cpt_write_miss++; } else // wait an empty entry in TRT { r_write_fsm = WRITE_WAIT; m_cpt_trt_full++; } } break; } //////////////// case WRITE_WAIT: // release the locks protecting the shared ressources { #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Releases the locks before retry" << std::endl; } #endif r_write_fsm = WRITE_DIR_LOCK; break; } //////////////////////// case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { std::vector be_vector; std::vector data_vector; be_vector.clear(); data_vector.clear(); for ( size_t i=0; i Set a new entry in TRT" << std::endl; } #endif } break; } ///////////////////////// case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { std::vector be_vector; std::vector data_vector; be_vector.clear(); data_vector.clear(); for ( size_t i=0; i Modify an existing entry in TRT" << std::endl; m_transaction_tab.print( r_write_trt_index.read() ); } #endif } break; } ///////////////////////// case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM { if ( !r_write_to_ixr_cmd_req ) { r_write_to_ixr_cmd_req = true; r_write_to_ixr_cmd_write = false; r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); r_write_fsm = WRITE_RSP; #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Post a GET request to the IXR_CMD FSM" << std::endl; } #endif } break; } /////////////////////// case WRITE_BC_TRT_LOCK: // Check TRT not full { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { size_t wok_index = 0; bool wok = !m_transaction_tab.full( wok_index ); if ( wok ) // set a new entry in TRT { r_write_trt_index = wok_index; r_write_fsm = WRITE_BC_UPT_LOCK; } else // wait an empty entry in TRT { r_write_fsm = WRITE_WAIT; } #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Check TRT : wok = " << wok << " / index = " << wok_index << std::endl; } #endif } break; } ////////////////////// case WRITE_BC_UPT_LOCK: // register BC transaction in UPT { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { bool wok = false; size_t index = 0; size_t srcid = r_write_srcid.read(); size_t trdid = r_write_trdid.read(); size_t pktid = r_write_pktid.read(); addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; size_t nb_copies = r_write_count.read(); wok =m_update_tab.set(false, // it's an inval transaction true, // it's a broadcast true, // it needs a response srcid, trdid, pktid, nline, nb_copies, index); #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { if ( wok ) { std::cout << " Register the broadcast inval in UPT / " << " nb_copies = " << r_write_count.read() << std::endl; } } #endif r_write_upt_index = index; if ( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; else r_write_fsm = WRITE_WAIT; } break; } //////////////////////// case WRITE_BC_DIR_INVAL: // Register a put transaction to XRAM in TRT // and invalidate the line in directory { if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; exit(0); } // register a write request to XRAM in TRT m_transaction_tab.set(r_write_trt_index.read(), false, // write request to XRAM m_nline[(vci_addr_t)(r_write_address.read())], 0, 0, 0, false, // not a processor read 0, // not a single word 0, // word index std::vector(m_words,0), std::vector(m_words,0)); // invalidate directory entry DirectoryEntry entry; entry.valid = false; entry.dirty = false; entry.tag = 0; entry.is_cnt = false; entry.lock = false; entry.owner.srcid = 0; #if L1_MULTI_CACHE entry.owner.cache_id= 0; #endif entry.owner.inst = false; entry.ptr = 0; entry.count = 0; size_t set = m_y[(vci_addr_t)(r_write_address.read())]; size_t way = r_write_way.read(); m_cache_directory.write(set, way, entry); #if DEBUG_MEMC_WRITE if( m_debug_write_fsm ) { std::cout << " Invalidate the directory entry: @ = " << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; } #endif r_write_fsm = WRITE_BC_CC_SEND; break; } ////////////////////// case WRITE_BC_CC_SEND: // Post a coherence broadcast request to INIT_CMD FSM { if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) { r_write_to_init_cmd_multi_req = false; r_write_to_init_cmd_brdcast_req = true; r_write_to_init_cmd_trdid = r_write_upt_index.read(); r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; r_write_to_init_cmd_index = 0; r_write_to_init_cmd_count = 0; for(size_t i=0; i Post a broadcast request to INIT_CMD FSM" << std::endl; } #endif } break; } /////////////////////// case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM { if ( !r_write_to_ixr_cmd_req ) { r_write_to_ixr_cmd_req = true; r_write_to_ixr_cmd_write = true; r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); for(size_t i=0; i Post a put request to IXR_CMD FSM" << std::endl; } #endif } break; } } // end switch r_write_fsm /////////////////////////////////////////////////////////////////////// // IXR_CMD FSM /////////////////////////////////////////////////////////////////////// // The IXR_CMD fsm controls the command packets to the XRAM : // - It sends a single cell VCI read request to the XRAM in case of MISS // posted by the READ, WRITE or SC FSMs : the TRDID field contains // the Transaction Tab index. // The VCI response is a multi-cell packet : the N cells contain // the N data words. // - It sends a multi-cell VCI write when the XRAM_RSP FSM, WRITE FSM // or SC FSM request to save a dirty line to the XRAM. // The VCI response is a single cell packet. // This FSM handles requests from the READ, WRITE, SC & XRAM_RSP FSMs // with a round-robin priority. //////////////////////////////////////////////////////////////////////// switch ( r_ixr_cmd_fsm.read() ) { //////////////////////// case IXR_CMD_READ_IDLE: if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; break; //////////////////////// case IXR_CMD_WRITE_IDLE: if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; break; //////////////////////// case IXR_CMD_SC_IDLE: if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; break; //////////////////////// case IXR_CMD_XRAM_IDLE: if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; break; ///////////////////////// // send a get request to XRAM case IXR_CMD_READ_NLINE: if ( p_vci_ixr.cmdack ) { r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; r_read_to_ixr_cmd_req = false; #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a get request to xram" << std::endl; } #endif } break; ////////////////////////// case IXR_CMD_WRITE_NLINE: // send a put or get command to XRAM if ( p_vci_ixr.cmdack ) { if( r_write_to_ixr_cmd_write.read()) { if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { r_ixr_cmd_cpt = 0; r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; r_write_to_ixr_cmd_req = false; } else { r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; } #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a put request to xram" << std::endl; } #endif } else { r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; r_write_to_ixr_cmd_req = false; #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a get request to xram" << std::endl; } #endif } } break; ////////////////////// case IXR_CMD_SC_NLINE: // send a put or get command to XRAM if ( p_vci_ixr.cmdack ) { if( r_sc_to_ixr_cmd_write.read()) { if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { r_ixr_cmd_cpt = 0; r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; r_sc_to_ixr_cmd_req = false; } else { r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; } #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a put request to xram" << std::endl; } #endif } else { r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; r_sc_to_ixr_cmd_req = false; #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a get request to xram" << std::endl; } #endif } } break; //////////////////////// case IXR_CMD_XRAM_DATA: // send a put command to XRAM if ( p_vci_ixr.cmdack ) { if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { r_ixr_cmd_cpt = 0; r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; r_xram_rsp_to_ixr_cmd_req = false; } else { r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; } #if DEBUG_MEMC_IXR_CMD if( m_debug_ixr_cmd_fsm ) { std::cout << " Send a put request to xram" << std::endl; } #endif } break; } // end switch r_ixr_cmd_fsm //////////////////////////////////////////////////////////////////////////// // IXR_RSP FSM //////////////////////////////////////////////////////////////////////////// // The IXR_RSP FSM receives the response packets from the XRAM, // for both put transaction, and get transaction. // // - A response to a put request is a single-cell VCI packet. // The Transaction Tab index is contained in the RTRDID field. // The FSM takes the lock protecting the TRT, and the corresponding // entry is erased. // // - A response to a get request is a multi-cell VCI packet. // The Transaction Tab index is contained in the RTRDID field. // The N cells contain the N words of the cache line in the RDATA field. // The FSM takes the lock protecting the TRT to store the line in the TRT // (taking into account the write requests already stored in the TRT). // When the line is completely written, the corresponding rok signal is set. /////////////////////////////////////////////////////////////////////////////// switch ( r_ixr_rsp_fsm.read() ) { ////////////////// case IXR_RSP_IDLE: // test if it's a get or a put transaction { if ( p_vci_ixr.rspval.read() ) { r_ixr_rsp_cpt = 0; r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); if ( p_vci_ixr.reop.read() && !(p_vci_ixr.rerror.read()&0x1)) // put transaction { r_ixr_rsp_fsm = IXR_RSP_ACK; #if DEBUG_MEMC_IXR_RSP if( m_debug_ixr_rsp_fsm ) { std::cout << " Response from XRAM to a put transaction" << std::endl; } #endif } else // get transaction { r_ixr_rsp_fsm = IXR_RSP_TRT_READ; #if DEBUG_MEMC_IXR_RSP if( m_debug_ixr_rsp_fsm ) { std::cout << " Response from XRAM to a get transaction" << std::endl; } #endif } } break; } //////////////////////// case IXR_RSP_ACK: // Aknowledge the VCI response { if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; #if DEBUG_MEMC_IXR_RSP if( m_debug_ixr_rsp_fsm ) { std::cout << " " << std::endl; } #endif break; } //////////////////////// case IXR_RSP_TRT_ERASE: // erase the entry in the TRT { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) { m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); r_ixr_rsp_fsm = IXR_RSP_IDLE; #if DEBUG_MEMC_IXR_RSP if( m_debug_ixr_rsp_fsm ) { std::cout << " Erase TRT entry " << r_ixr_rsp_trt_index.read() << std::endl; } #endif } break; } /////////////////////// case IXR_RSP_TRT_READ: // write data in the TRT { if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) { size_t index = r_ixr_rsp_trt_index.read(); bool eop = p_vci_ixr.reop.read(); data_t data = p_vci_ixr.rdata.read(); bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); m_transaction_tab.write_rsp(index, r_ixr_rsp_cpt.read(), data, error); r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; if ( eop ) { r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; r_ixr_rsp_fsm = IXR_RSP_IDLE; } #if DEBUG_MEMC_IXR_RSP if( m_debug_ixr_rsp_fsm ) { std::cout << " Writing a word in TRT : " << " index = " << std::dec << index << " / word = " << r_ixr_rsp_cpt.read() << " / data = " << std::hex << data << std::endl; } #endif } break; } } // end swich r_ixr_rsp_fsm //////////////////////////////////////////////////////////////////////////// // XRAM_RSP FSM //////////////////////////////////////////////////////////////////////////// // The XRAM_RSP FSM handles the incoming cache lines from the XRAM. // The cache line has been written in the TRT by the IXR_CMD_FSM. // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] // as the number of entries in the TRT, that are handled with // a round-robin priority... // // When a response is available, the corresponding TRT entry // must be copied in a local buffer to be written in the cache. // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. // It selects a cache slot and writes the line in the cache. // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP // FSM to return the cache line to the registered processor. // If there is no empty slot, a victim line is evicted, and // invalidate requests are sent to the L1 caches containing copies. // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD // FSM to save the victim line to the XRAM, and register the write transaction // in the TRT (using the entry previously used by the read transaction). /////////////////////////////////////////////////////////////////////////////// switch ( r_xram_rsp_fsm.read() ) { /////////////////// case XRAM_RSP_IDLE: // scan the XRAM responses to get the TRT index (round robin) { size_t ptr = r_xram_rsp_trt_index.read(); size_t lines = m_transaction_tab_lines; for( size_t i=0 ; i Available cache line in TRT:" << " index = " << std::dec << index << std::endl; } #endif break; } } break; } /////////////////////// case XRAM_RSP_DIR_LOCK: // Takes the lock on the directory { if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) { r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Get access to directory" << std::endl; } #endif } break; } /////////////////////// case XRAM_RSP_TRT_COPY: // Takes the lock on TRT // Copy the TRT entry in a local buffer // and select a victim cache line { if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) { // copy the TRT entry in the r_xram_rsp_trt_buf local buffer size_t index = r_xram_rsp_trt_index.read(); TransactionTabEntry trt_entry(m_transaction_tab.read(index)); r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer // selects & extracts a victim line from cache size_t way = 0; size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; DirectoryEntry victim(m_cache_directory.select(set, way)); bool inval = (victim.count && victim.valid) ; // copy the victim line in a local buffer for (size_t i=0 ; i Select a slot: " << " way = " << std::dec << way << " / set = " << set << " / inval_required = " << inval << std::endl; } #endif } break; } ///////////////////////// case XRAM_RSP_INVAL_LOCK: // check a possible pending inval { if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { size_t index; if (m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)) { r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Get acces to UPT," << " but an invalidation is already registered at this address" << std::endl; m_update_tab.print(); } #endif } else if (m_update_tab.is_full() && r_xram_rsp_victim_inval.read()) { r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Get acces to UPT," << " but the table is full" << std::endl; m_update_tab.print(); } #endif } else { r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Get acces to UPT" << std::endl; } #endif } } break; } ///////////////////////// case XRAM_RSP_INVAL_WAIT: // returns to DIR_LOCK to retry { r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; break; } /////////////////////// case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) // and possibly set an inval request in UPT { // signals generation bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read; // update data size_t set = r_xram_rsp_victim_set.read(); size_t way = r_xram_rsp_victim_way.read(); for(size_t i=0; i Directory update: " << " way = " << std::dec << way << " / set = " << set << " / count = " << entry.count << " / is_cnt = " << entry.is_cnt << std::endl; if (r_xram_rsp_victim_inval.read()) std::cout << " Invalidation request for victim line " << std::hex << r_xram_rsp_victim_nline.read() << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; } #endif // If the victim is not dirty, we don't need another XRAM put transaction, // and we canwe erase the TRT entry if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); // Next state if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; else r_xram_rsp_fsm = XRAM_RSP_IDLE; break; } //////////////////////// case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty { if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) { m_transaction_tab.set( r_xram_rsp_trt_index.read(), false, // write to XRAM r_xram_rsp_victim_nline.read(), // line index 0, 0, 0, false, 0, 0, std::vector(m_words,0), std::vector(m_words,0) ); #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Set TRT entry for the put transaction:" << " dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; } #endif if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; } break; } ////////////////////// case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM { if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; r_xram_rsp_to_tgt_rsp_rerror = false; r_xram_rsp_to_tgt_rsp_req = true; if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; else r_xram_rsp_fsm = XRAM_RSP_IDLE; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Request the TGT_RSP FSM to return data:" << " rsrcid = " << std::dec << r_xram_rsp_trt_buf.srcid << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; } #endif } break; } //////////////////// case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM { if( !r_xram_rsp_to_init_cmd_multi_req.read() && !r_xram_rsp_to_init_cmd_brdcast_req.read() ) { bool multi_req = !r_xram_rsp_victim_is_cnt.read(); bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); r_xram_rsp_to_init_cmd_multi_req = last_multi_req; r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); #if L1_MULTI_CACHE xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); #endif xram_rsp_to_init_cmd_fifo_put = multi_req; r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; else r_xram_rsp_fsm = XRAM_RSP_IDLE; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Send an inval request to INIT_CMD FSM:" << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; } #endif } break; } ////////////////////////// case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM { if ( !r_xram_rsp_to_ixr_cmd_req.read() ) { r_xram_rsp_to_ixr_cmd_req = true; r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); for(size_t i=0; i Send the put request to IXR_CMD FSM:" << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; } #endif } break; } ///////////////////////// case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations { if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) { HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; #if L1_MULTI_CACHE xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; #endif xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; xram_rsp_to_init_cmd_fifo_put = true; if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ) { r_xram_rsp_next_ptr = entry.next; if( entry.next == r_xram_rsp_next_ptr.read() ) // last copy { r_xram_rsp_to_init_cmd_multi_req = true; r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; } else { r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; } } else { r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; } #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Erase the list of copies:" << " srcid = " << std::dec << entry.owner.srcid << " / inst = " << std::dec << entry.owner.inst << std::endl; } #endif } break; } ///////////////////////// case XRAM_RSP_HEAP_LAST: // last member of the list { if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; std::cout << "bad HEAP allocation" << std::endl; exit(0); } size_t free_pointer = m_heap.next_free_ptr(); HeapEntry last_entry; last_entry.owner.srcid = 0; #if L1_MULTI_CACHE last_entry.owner.cache_id = 0; #endif last_entry.owner.inst = false; if(m_heap.is_full()) { last_entry.next = r_xram_rsp_next_ptr.read(); m_heap.unset_full(); } else { last_entry.next = free_pointer; } m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); r_xram_rsp_fsm = XRAM_RSP_IDLE; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Heap housekeeping" << std::endl; } #endif break; } // /////////////////////// case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error { m_transaction_tab.erase(r_xram_rsp_trt_index.read()); // Next state if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; else r_xram_rsp_fsm = XRAM_RSP_IDLE; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Error reported by XRAM / erase the TRT entry" << std::endl; } #endif break; } //////////////////////// case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM { if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; r_xram_rsp_to_tgt_rsp_rerror = true; r_xram_rsp_to_tgt_rsp_req = true; r_xram_rsp_fsm = XRAM_RSP_IDLE; #if DEBUG_MEMC_XRAM_RSP if( m_debug_xram_rsp_fsm ) { std::cout << " Request a response error to TGT_RSP FSM:" << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; } #endif } break; } } // end swich r_xram_rsp_fsm //////////////////////////////////////////////////////////////////////////////////// // CLEANUP FSM //////////////////////////////////////////////////////////////////////////////////// // The CLEANUP FSM handles the cleanup request from L1 caches. // It accesses the cache directory and the heap to update the list of copies. //////////////////////////////////////////////////////////////////////////////////// switch ( r_cleanup_fsm.read() ) { ////////////////// case CLEANUP_IDLE: { if ( p_vci_tgt_cleanup.cmdval.read() ) { if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_IDLE state" << std::endl; std::cout << "illegal srcid for cleanup request" << std::endl; exit(0); } bool reached = false; for ( size_t index = 0 ; index < m_ncseg && !reached ; index++ ) { if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) reached = true; } // only write request to a mapped address that are not broadcast are handled if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && reached) { addr_t line =(((addr_t) p_vci_tgt_cleanup.be.read() << (vci_param::B*8))) | (((addr_t) p_vci_tgt_cleanup.wdata.read())); r_cleanup_nline = line; r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); r_cleanup_fsm = CLEANUP_DIR_LOCK; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Cleanup request:" << std::hex << " line = " << line * m_words * 4 << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) << std::endl; } #endif m_cpt_cleanup++; } } break; } ////////////////////// case CLEANUP_DIR_LOCK: // test directory status { if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) { // Read the directory size_t way = 0; addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); r_cleanup_is_cnt = entry.is_cnt; r_cleanup_dirty = entry.dirty; r_cleanup_tag = entry.tag; r_cleanup_lock = entry.lock; r_cleanup_way = way; r_cleanup_copy = entry.owner.srcid; #if L1_MULTI_CACHE r_cleanup_copy_cache= entry.owner.cache_id; #endif r_cleanup_copy_inst = entry.owner.inst; r_cleanup_count = entry.count; r_cleanup_ptr = entry.ptr; if( entry.valid) // hit : the copy must be cleared { if ( (entry.count==1) || (entry.is_cnt) ) // no access to the heap { r_cleanup_fsm = CLEANUP_DIR_WRITE; } else // access to the heap { r_cleanup_fsm = CLEANUP_HEAP_LOCK; } } else // miss : we must check the update table { r_cleanup_fsm = CLEANUP_UPT_LOCK; } #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Test directory status: " << std::hex << " line = " << r_cleanup_nline.read() * m_words * 4 << " / hit = " << entry.valid << " / dir_id = " << entry.owner.srcid << " / dir_ins = " << entry.owner.inst << " / search_id = " << r_cleanup_srcid.read() << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << " / count = " << entry.count << " / is_cnt = " << entry.is_cnt << std::endl; } #endif } break; } /////////////////////// case CLEANUP_DIR_WRITE: // update the directory entry without heap access { if ( r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_DIR_WRITE state" << " bad DIR allocation" << std::endl; exit(0); } size_t way = r_cleanup_way.read(); size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; bool cleanup_inst = r_cleanup_trdid.read() & 0x1; bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) #if L1_MULTI_CACHE and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()) #endif ); bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); bool match = match_srcid && match_inst; // update the cache directory (for the copies) DirectoryEntry entry; entry.valid = true; entry.is_cnt = r_cleanup_is_cnt.read(); entry.dirty = r_cleanup_dirty.read(); entry.tag = r_cleanup_tag.read(); entry.lock = r_cleanup_lock.read(); entry.ptr = r_cleanup_ptr.read(); if ( r_cleanup_is_cnt.read() ) // counter mode { entry.count = r_cleanup_count.read() -1; entry.owner.srcid = 0; #if L1_MULTI_CACHE entry.owner.cache_id= 0; #endif entry.owner.inst = 0; // response to the cache r_cleanup_fsm = CLEANUP_RSP; } else // linked_list mode { if ( match ) // hit { entry.count = 0; // no more copy entry.owner.srcid = 0; #if L1_MULTI_CACHE entry.owner.cache_id=0; #endif entry.owner.inst = 0; r_cleanup_fsm = CLEANUP_RSP; } else // miss { entry.count = r_cleanup_count.read(); entry.owner.srcid = r_cleanup_copy.read(); #if L1_MULTI_CACHE entry.owner.cache_id = r_cleanup_copy_cache.read(); #endif entry.owner.inst = r_cleanup_copy_inst.read(); r_cleanup_fsm = CLEANUP_UPT_LOCK; } } m_cache_directory.write(set, way, entry); #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Update directory:" << std::hex << " line = " << r_cleanup_nline.read() * m_words * 4 << " / dir_id = " << entry.owner.srcid << " / dir_ins = " << entry.owner.inst << " / count = " << entry.count << " / is_cnt = " << entry.is_cnt << std::endl; } #endif break; } /////////////////////// case CLEANUP_HEAP_LOCK: // two cases are handled in this state: // - the matching copy is directly in the directory // - the matching copy is the first copy in the heap { if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) { size_t way = r_cleanup_way.read(); size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); bool last = (heap_entry.next == r_cleanup_ptr.read()); bool cleanup_inst = r_cleanup_trdid.read() & 0x1; // match_dir computation bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); bool match_dir = match_dir_srcid and match_dir_inst; #if L1_MULTI_CACHE match_dir = match_dir and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); #endif // match_heap computation bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); bool match_heap = match_heap_srcid and match_heap_inst; #if L1_MULTI_CACHE match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); #endif r_cleanup_prev_ptr = r_cleanup_ptr.read(); r_cleanup_prev_srcid = heap_entry.owner.srcid; #if L1_MULTI_CACHE r_cleanup_prev_cache_id = heap_entry.owner.cache_id; #endif r_cleanup_prev_inst = heap_entry.owner.inst; if (match_dir) // the matching copy is registered in the directory { // the copy registered in the directory must be replaced // by the first copy registered in the heap // and the corresponding entry must be freed DirectoryEntry dir_entry; dir_entry.valid = true; dir_entry.is_cnt = r_cleanup_is_cnt.read(); dir_entry.dirty = r_cleanup_dirty.read(); dir_entry.tag = r_cleanup_tag.read(); dir_entry.lock = r_cleanup_lock.read(); dir_entry.ptr = heap_entry.next; dir_entry.count = r_cleanup_count.read()-1; dir_entry.owner.srcid = heap_entry.owner.srcid; #if L1_MULTI_CACHE dir_entry.owner.cache_id = heap_entry.owner.cache_id; #endif dir_entry.owner.inst = heap_entry.owner.inst; m_cache_directory.write(set,way,dir_entry); r_cleanup_next_ptr = r_cleanup_ptr.read(); r_cleanup_fsm = CLEANUP_HEAP_FREE; } else if (match_heap) // the matching copy is the first copy in the heap { // The first copy in heap must be freed // and the copy registered in directory must point to the next copy in heap DirectoryEntry dir_entry; dir_entry.valid = true; dir_entry.is_cnt = r_cleanup_is_cnt.read(); dir_entry.dirty = r_cleanup_dirty.read(); dir_entry.tag = r_cleanup_tag.read(); dir_entry.lock = r_cleanup_lock.read(); dir_entry.ptr = heap_entry.next; dir_entry.count = r_cleanup_count.read()-1; dir_entry.owner.srcid = r_cleanup_copy.read(); #if L1_MULTI_CACHE dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); #endif dir_entry.owner.inst = r_cleanup_copy_inst.read(); m_cache_directory.write(set,way,dir_entry); r_cleanup_next_ptr = r_cleanup_ptr.read(); r_cleanup_fsm = CLEANUP_HEAP_FREE; } else if(!last) // The matching copy is in the heap, but is not the first copy { // The directory entry must be modified to decrement count DirectoryEntry dir_entry; dir_entry.valid = true; dir_entry.is_cnt = r_cleanup_is_cnt.read(); dir_entry.dirty = r_cleanup_dirty.read(); dir_entry.tag = r_cleanup_tag.read(); dir_entry.lock = r_cleanup_lock.read(); dir_entry.ptr = r_cleanup_ptr.read(); dir_entry.count = r_cleanup_count.read()-1; dir_entry.owner.srcid = r_cleanup_copy.read(); #if L1_MULTI_CACHE dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); #endif dir_entry.owner.inst = r_cleanup_copy_inst.read(); m_cache_directory.write(set,way,dir_entry); r_cleanup_next_ptr = heap_entry.next; r_cleanup_fsm = CLEANUP_HEAP_SEARCH; } else { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_LOCK state" << " hit but copy not found" << std::endl; exit(0); } #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Checks matching:" << " line = " << r_cleanup_nline.read() * m_words * 4 << " / dir_id = " << r_cleanup_copy.read() << " / dir_ins = " << r_cleanup_copy_inst.read() << " / heap_id = " << heap_entry.owner.srcid << " / heap_ins = " << heap_entry.owner.inst << " / search_id = " << r_cleanup_srcid.read() << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; } #endif } break; } ///////////////////////// case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy // is in the heap, but is not the first in the linked list { if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_SEARCH state" << " bad HEAP allocation" << std::endl; exit(0); } HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); bool last = (heap_entry.next == r_cleanup_next_ptr.read()); bool cleanup_inst = r_cleanup_trdid.read() & 0x1; bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); bool match_heap = match_heap_srcid && match_heap_inst; #if L1_MULTI_CACHE match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); #endif #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Cheks matching:" << " line = " << r_cleanup_nline.read() * m_words * 4 << " / heap_id = " << heap_entry.owner.srcid << " / heap_ins = " << heap_entry.owner.inst << " / search_id = " << r_cleanup_srcid.read() << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << " / last = " << last << std::endl; } #endif if(match_heap) // the matching copy must be removed { r_cleanup_ptr = heap_entry.next; // reuse ressources r_cleanup_fsm = CLEANUP_HEAP_CLEAN; } else { if ( last ) { std::cout << "VCI_MEM_CACHE_ERROR " << name() << " CLEANUP_HEAP_SEARCH state" << " cleanup hit but copy not found" << std::endl; exit(0); } else // test the next in the linked list { r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); r_cleanup_prev_srcid = heap_entry.owner.srcid; #if L1_MULTI_CACHE r_cleanup_prev_cache_id = heap_entry.owner.cache_id; #endif r_cleanup_prev_inst = heap_entry.owner.inst; r_cleanup_next_ptr = heap_entry.next; r_cleanup_fsm = CLEANUP_HEAP_SEARCH; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Matching copy not found, search next:" << " line = " << r_cleanup_nline.read() * m_words * 4 << " / heap_id = " << heap_entry.owner.srcid << " / heap_ins = " << heap_entry.owner.inst << " / search_id = " << r_cleanup_srcid.read() << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; } #endif } } break; } //////////////////////// case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list { if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_CLEAN state" << "Bad HEAP allocation" << std::endl; exit(0); } bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); HeapEntry heap_entry; heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); #if L1_MULTI_CACHE heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); #endif heap_entry.owner.inst = r_cleanup_prev_inst.read(); if(last) // this is the last entry of the list of copies { heap_entry.next = r_cleanup_prev_ptr.read(); } else // this is not the last entry { heap_entry.next = r_cleanup_ptr.read(); } m_heap.write(r_cleanup_prev_ptr.read(),heap_entry); r_cleanup_fsm = CLEANUP_HEAP_FREE; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Remove the copy in the linked list" << std::endl; } #endif break; } /////////////////////// case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed // and becomes the head of the list of free entries { if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) { std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_CLEAN state" << std::endl; std::cout << "Bad HEAP allocation" << std::endl; exit(0); } HeapEntry heap_entry; heap_entry.owner.srcid = 0; #if L1_MULTI_CACHE heap_entry.owner.cache_id = 0; #endif heap_entry.owner.inst = false; if(m_heap.is_full()) heap_entry.next = r_cleanup_next_ptr.read(); else heap_entry.next = m_heap.next_free_ptr(); m_heap.write(r_cleanup_next_ptr.read(),heap_entry); m_heap.write_free_ptr(r_cleanup_next_ptr.read()); m_heap.unset_full(); r_cleanup_fsm = CLEANUP_RSP; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Update the list of free entries" << std::endl; } #endif break; } ////////////////////// case CLEANUP_UPT_LOCK: { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_CLEANUP ) { size_t index = 0; bool hit_inval; hit_inval = m_update_tab.search_inval(r_cleanup_nline.read(),index); if ( !hit_inval ) // no pending inval { #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Unexpected cleanup with no corresponding UPT entry:" << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; } #endif r_cleanup_fsm = CLEANUP_RSP; } else // pending inval { r_cleanup_write_srcid = m_update_tab.srcid(index); r_cleanup_write_trdid = m_update_tab.trdid(index); r_cleanup_write_pktid = m_update_tab.pktid(index); r_cleanup_need_rsp = m_update_tab.need_rsp(index); r_cleanup_fsm = CLEANUP_UPT_WRITE; } r_cleanup_index.write(index) ; } break; } /////////////////////// case CLEANUP_UPT_WRITE: // decrement response counter { size_t count = 0; m_update_tab.decrement(r_cleanup_index.read(), count); if ( count == 0 ) { m_update_tab.clear(r_cleanup_index.read()); #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Decrement response counter in UPT:" << " UPT_index = " << r_cleanup_index.read() << " rsp_count = " << count << std::endl; } #endif if( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP ; else r_cleanup_fsm = CLEANUP_RSP; } else { r_cleanup_fsm = CLEANUP_RSP ; } break; } /////////////////////// case CLEANUP_WRITE_RSP: // Response to a previous write on the direct network { if( !r_cleanup_to_tgt_rsp_req.read() ) { r_cleanup_to_tgt_rsp_req = true; r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); r_cleanup_fsm = CLEANUP_RSP; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Send a response to a cleanup request:" << " rsrcid = " << std::dec << r_cleanup_write_srcid.read() << " / rtrdid = " << std::dec << r_cleanup_write_trdid.read() << std::endl; } #endif } break; } ///////////////// case CLEANUP_RSP: // Response to a cleanup on the coherence network { if ( p_vci_tgt_cleanup.rspack.read() ) { r_cleanup_fsm = CLEANUP_IDLE; #if DEBUG_MEMC_CLEANUP if( m_debug_cleanup_fsm ) { std::cout << " Send the response to a cleanup request:" << " rsrcid = " << std::dec << r_cleanup_write_srcid.read() << " / rtrdid = " << r_cleanup_write_trdid.read() << std::endl; } #endif } break; } } // end switch cleanup fsm //////////////////////////////////////////////////////////////////////////////////// // SC FSM //////////////////////////////////////////////////////////////////////////////////// // The SC FSM handles the SC (Store Conditionnal) atomic commands, // that are handled as "compare-and-swap instructions. // // This command contains two or four flits: // - In case of 32 bits atomic access, the first flit contains the value read // by a previous LL instruction, the second flit contains the value to be writen. // - In case of 64 bits atomic access, the 2 first flits contains the value read // by a previous LL instruction, the 2 next flits contains the value to be writen. // // The target address is cachable. If it is replicated in other L1 caches // than the writer, a coherence operation is done. // // It access the directory to check hit / miss. // - In case of miss, the SC FSM must register a GET transaction in TRT. // If a read transaction to the XRAM for this line already exists, // or if the transaction table is full, it goes to the WAIT state // to release the locks and try again. When the GET transaction has been // launched, it goes to the WAIT state and try again. // The SC request is not consumed in the FIFO until a HIT is obtained. // - In case of hit... /////////////////////////////////////////////////////////////////////////////////// switch ( r_sc_fsm.read() ) { ///////////// case SC_IDLE: // fill the local rdata buffers { if( m_cmd_sc_addr_fifo.rok() ) { #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " SC command: " << std::hex << " srcid = " << std::dec << m_cmd_sc_srcid_fifo.read() << " addr = " << std::hex << m_cmd_sc_addr_fifo.read() << " wdata = " << m_cmd_sc_wdata_fifo.read() << " eop = " << std::dec << m_cmd_sc_eop_fifo.read() << " cpt = " << std::dec << r_sc_cpt.read() << std::endl; } #endif if( m_cmd_sc_eop_fifo.read() ) { m_cpt_sc++; r_sc_fsm = SC_DIR_LOCK; } else // we keep the last word in the FIFO { cmd_sc_fifo_get = true; } // We fill the two buffers if ( r_sc_cpt.read() < 2 ) // 32 bits access r_sc_rdata[r_sc_cpt.read()] = m_cmd_sc_wdata_fifo.read(); if((r_sc_cpt.read() == 1) && m_cmd_sc_eop_fifo.read()) r_sc_wdata = m_cmd_sc_wdata_fifo.read(); if( r_sc_cpt.read()>3 ) // more than 4 flits... { std::cout << "VCI_MEM_CACHE ERROR in SC_IDLE state : illegal SC command" << std::endl; exit(0); } if ( r_sc_cpt.read()==2 ) r_sc_wdata = m_cmd_sc_wdata_fifo.read(); r_sc_cpt = r_sc_cpt.read()+1; } break; } ///////////////// case SC_DIR_LOCK: // Read the directory { if( r_alloc_dir_fsm.read() == ALLOC_DIR_SC ) { size_t way = 0; DirectoryEntry entry(m_cache_directory.read(m_cmd_sc_addr_fifo.read(), way)); r_sc_is_cnt = entry.is_cnt; r_sc_dirty = entry.dirty; r_sc_tag = entry.tag; r_sc_way = way; r_sc_copy = entry.owner.srcid; #if L1_MULTI_CACHE r_sc_copy_cache = entry.owner.cache_id; #endif r_sc_copy_inst = entry.owner.inst; r_sc_ptr = entry.ptr; r_sc_count = entry.count; if ( entry.valid ) r_sc_fsm = SC_DIR_HIT_READ; else r_sc_fsm = SC_MISS_TRT_LOCK; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Directory acces" << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() << " / hit = " << std::dec << entry.valid << " / count = " << entry.count << " / is_cnt = " << entry.is_cnt << std::endl; } #endif } break; } ///////////////////// case SC_DIR_HIT_READ: // update directory for lock and dirty bit // and check data change in cache { size_t way = r_sc_way.read(); size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; // update directory (lock & dirty bits) DirectoryEntry entry; entry.valid = true; entry.is_cnt = r_sc_is_cnt.read(); entry.dirty = true; entry.lock = true; entry.tag = r_sc_tag.read(); entry.owner.srcid = r_sc_copy.read(); #if L1_MULTI_CACHE entry.owner.cache_id = r_sc_copy_cache.read(); #endif entry.owner.inst = r_sc_copy_inst.read(); entry.count = r_sc_count.read(); entry.ptr = r_sc_ptr.read(); m_cache_directory.write(set, way, entry); // read data in cache & check data change bool ok = ( r_sc_rdata[0].read() == m_cache_data[way][set][word] ); if ( r_sc_cpt.read()==4 ) // 64 bits SC ok &= ( r_sc_rdata[1] == m_cache_data[way][set][word+1] ); // to avoid livelock, force the atomic access to fail pseudo-randomly bool forced_fail = ( (r_sc_lfsr % (64) == 0) && RANDOMIZE_SC ); r_sc_lfsr = (r_sc_lfsr >> 1) ^ ((-(r_sc_lfsr & 1)) & 0xd0000001); if( ok and not forced_fail ) // no data change { r_sc_fsm = SC_DIR_HIT_WRITE; } else // return failure { r_sc_fsm = SC_RSP_FAIL; } #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Test if SC success:" << " / expected value = " << r_sc_rdata[0].read() << " / actual value = " << m_cache_data[way][set][word] << " / forced_fail = " << forced_fail << std::endl; } #endif break; } ////////////////////// case SC_DIR_HIT_WRITE: // test if a CC transaction is required // write data in cache if no CC request { // test coherence request if(r_sc_count.read()) // replicated line { if ( r_sc_is_cnt.read() ) { r_sc_fsm = SC_BC_TRT_LOCK; // broadcast invalidate required } else if( !r_sc_to_init_cmd_multi_req.read() && !r_sc_to_init_cmd_brdcast_req.read() ) { r_sc_fsm = SC_UPT_LOCK; // multi update required } else { r_sc_fsm = SC_WAIT; } } else // no copies { size_t way = r_sc_way.read(); size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; // cache update m_cache_data[way][set][word] = r_sc_wdata.read(); if(r_sc_cpt.read()==4) m_cache_data[way][set][word+1] = m_cmd_sc_wdata_fifo.read(); // monitor if ( m_monitor_ok ) { vci_addr_t address = m_cmd_sc_addr_fifo.read(); char buf[80]; snprintf(buf, 80, "SC_DIR_HIT_WRITE srcid %d", m_cmd_sc_srcid_fifo.read()); check_monitor( buf, address, r_sc_wdata.read() ); if ( r_sc_cpt.read()==4 ) check_monitor( buf, address+4, m_cmd_sc_wdata_fifo.read() ); } r_sc_fsm = SC_RSP_SUCCESS; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Update cache:" << " way = " << std::dec << way << " / set = " << set << " / word = " << word << " / value = " << r_sc_wdata.read() << " / count = " << r_sc_count.read() << std::endl; } #endif } break; } ///////////////// case SC_UPT_LOCK: // try to register the transaction in UPT // and write data in cache if successful registration // releases locks to retry later if UPT full { if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) { bool wok = false; size_t index = 0; size_t srcid = m_cmd_sc_srcid_fifo.read(); size_t trdid = m_cmd_sc_trdid_fifo.read(); size_t pktid = m_cmd_sc_pktid_fifo.read(); addr_t nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t nb_copies = r_sc_count.read(); wok = m_update_tab.set(true, // it's an update transaction false, // it's not a broadcast true, // it needs a response srcid, trdid, pktid, nline, nb_copies, index); if (wok) // coherence transaction registered in UPT { // cache update size_t way = r_sc_way.read(); size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; m_cache_data[way][set][word] = r_sc_wdata.read(); if(r_sc_cpt.read()==4) m_cache_data[way][set][word+1] = m_cmd_sc_wdata_fifo.read(); // monitor if ( m_monitor_ok ) { vci_addr_t address = m_cmd_sc_addr_fifo.read(); char buf[80]; snprintf(buf, 80, "SC_DIR_HIT_WRITE srcid %d", m_cmd_sc_srcid_fifo.read()); check_monitor( buf, address, r_sc_wdata.read() ); if ( r_sc_cpt.read()==4 ) check_monitor( buf, address+4, m_cmd_sc_wdata_fifo.read() ); } r_sc_upt_index = index; r_sc_fsm = SC_UPT_HEAP_LOCK; } else // releases the locks protecting UPT and DIR UPT full { r_sc_fsm = SC_WAIT; } #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Register multi-update transaction in UPT" << " / wok = " << wok << " / nline = " << std::hex << nline << " / count = " << nb_copies << std::endl; } #endif } break; } ///////////// case SC_WAIT: // release all locks and retry from beginning { #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Release all locks" << std::endl; } #endif r_sc_fsm = SC_DIR_LOCK; break; } ////////////////// case SC_UPT_HEAP_LOCK: // lock the heap { if( r_alloc_heap_fsm.read() == ALLOC_HEAP_SC ) { #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Get access to the heap" << std::endl; } #endif r_sc_fsm = SC_UPT_REQ; } break; } //////////////// case SC_UPT_REQ: // send a first update request to INIT_CMD FSM { assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); if( !r_sc_to_init_cmd_multi_req.read() && !r_sc_to_init_cmd_brdcast_req.read() ) { r_sc_to_init_cmd_brdcast_req = false; r_sc_to_init_cmd_trdid = r_sc_upt_index.read(); r_sc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; r_sc_to_init_cmd_index = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; r_sc_to_init_cmd_wdata = r_sc_wdata.read(); if(r_sc_cpt.read() == 4) { r_sc_to_init_cmd_is_long = true; r_sc_to_init_cmd_wdata_high = m_cmd_sc_wdata_fifo.read(); } else { r_sc_to_init_cmd_is_long = false; r_sc_to_init_cmd_wdata_high = 0; } // We put the first copy in the fifo sc_to_init_cmd_fifo_put = true; sc_to_init_cmd_fifo_inst = r_sc_copy_inst.read(); sc_to_init_cmd_fifo_srcid = r_sc_copy.read(); #if L1_MULTI_CACHE sc_to_init_cmd_fifo_cache_id= r_sc_copy_cache.read(); #endif if(r_sc_count.read() == 1) // one single copy { r_sc_fsm = SC_IDLE; // Response will be sent after receiving // update responses cmd_sc_fifo_get = true; r_sc_to_init_cmd_multi_req = true; r_sc_cpt = 0; } else // several copies { r_sc_fsm = SC_UPT_NEXT; } #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Send the first update request to INIT_CMD FSM " << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() << " / wdata = " << std::hex << r_sc_wdata.read() << " / srcid = " << std::dec << r_sc_copy.read() << " / inst = " << std::dec << r_sc_copy_inst.read() << std::endl; } #endif } break; } ///////////////// case SC_UPT_NEXT: // send a multi-update request to INIT_CMD FSM { assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); HeapEntry entry = m_heap.read(r_sc_ptr.read()); sc_to_init_cmd_fifo_srcid = entry.owner.srcid; #if L1_MULTI_CACHE sc_to_init_cmd_fifo_cache_id = entry.owner.cache_id; #endif sc_to_init_cmd_fifo_inst = entry.owner.inst; sc_to_init_cmd_fifo_put = true; if( m_sc_to_init_cmd_inst_fifo.wok() ) // request accepted by INIT_CMD FSM { r_sc_ptr = entry.next; if( entry.next == r_sc_ptr.read() ) // last copy { r_sc_to_init_cmd_multi_req = true; r_sc_fsm = SC_IDLE; // Response will be sent after receiving // all update responses cmd_sc_fifo_get = true; r_sc_cpt = 0; } } #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Send the next update request to INIT_CMD FSM " << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() << " / wdata = " << std::hex << r_sc_wdata.read() << " / srcid = " << std::dec << entry.owner.srcid << " / inst = " << std::dec << entry.owner.inst << std::endl; } #endif break; } ///////////////////// case SC_BC_TRT_LOCK: // check the TRT to register a PUT transaction { if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) { if( !r_sc_to_ixr_cmd_req ) // we can transfer the request to IXR_CMD FSM { // fill the data buffer size_t way = r_sc_way.read(); size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; for(size_t i = 0; i Register a broadcast inval transaction in UPT" << " / nline = " << nline << " / count = " << nb_copies << " / upt_index = " << index << std::endl; } #endif } else // releases the lock protecting UPT { r_sc_fsm = SC_WAIT; } } break; } ////////////////// case SC_BC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry { if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) && (r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) && (r_alloc_dir_fsm.read() == ALLOC_DIR_SC )) { // set TRT m_transaction_tab.set(r_sc_trt_index.read(), false, // PUT request to XRAM m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())], 0, 0, 0, false, // not a processor read 0, 0, std::vector(m_words,0), std::vector(m_words,0)); // invalidate directory entry DirectoryEntry entry; entry.valid = false; entry.dirty = false; entry.tag = 0; entry.is_cnt = false; entry.lock = false; entry.count = 0; entry.owner.srcid = 0; #if L1_MULTI_CACHE entry.owner.cache_id= 0; #endif entry.owner.inst = false; entry.ptr = 0; size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; size_t way = r_sc_way.read(); m_cache_directory.write(set, way, entry); r_sc_fsm = SC_BC_CC_SEND; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Register the PUT in TRT and invalidate DIR entry" << " / nline = " << std::hex << m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())] << " / set = " << std::dec << set << " / way = " << way << std::endl; } #endif } else { assert(false and "LOCK ERROR in SC_FSM, STATE = SC_BC_DIR_INVAL"); } break; } /////////////////// case SC_BC_CC_SEND: // Request the broadcast inval to INIT_CMD FSM { if ( !r_sc_to_init_cmd_multi_req.read() && !r_sc_to_init_cmd_brdcast_req.read()) { r_sc_to_init_cmd_multi_req = false; r_sc_to_init_cmd_brdcast_req = true; r_sc_to_init_cmd_trdid = r_sc_upt_index.read(); r_sc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; r_sc_to_init_cmd_index = 0; r_sc_to_init_cmd_wdata = 0; r_sc_fsm = SC_BC_XRAM_REQ; } break; } //////////////////// case SC_BC_XRAM_REQ: // request the IXR FSM to start a put transaction { if ( !r_sc_to_ixr_cmd_req ) { r_sc_to_ixr_cmd_req = true; r_sc_to_ixr_cmd_write = true; r_sc_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; r_sc_to_ixr_cmd_trdid = r_sc_trt_index.read(); r_sc_fsm = SC_IDLE; cmd_sc_fifo_get = true; r_sc_cpt = 0; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Request a PUT transaction to IXR_CMD FSM" << std::hex << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] << " / trt_index = " << r_sc_trt_index.read() << std::endl; } #endif } else { std::cout << "MEM_CACHE, SC_BC_XRAM_REQ state : request should not have been previously set" << std::endl; } break; } ///////////////// case SC_RSP_FAIL: // request TGT_RSP FSM to send a failure response { if( !r_sc_to_tgt_rsp_req ) { cmd_sc_fifo_get = true; r_sc_cpt = 0; r_sc_to_tgt_rsp_req = true; r_sc_to_tgt_rsp_data = 1; r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); r_sc_fsm = SC_IDLE; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Request TGT_RSP to send a failure response" << std::endl; } #endif } break; } //////////////////// case SC_RSP_SUCCESS: // request TGT_RSP FSM to send a success response { if( !r_sc_to_tgt_rsp_req ) { cmd_sc_fifo_get = true; r_sc_cpt = 0; r_sc_to_tgt_rsp_req = true; r_sc_to_tgt_rsp_data = 0; r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); r_sc_fsm = SC_IDLE; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Request TGT_RSP to send a success response" << std::endl; } #endif } break; } ///////////////////// case SC_MISS_TRT_LOCK: // cache miss : request access to transaction Table { if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) { size_t index = 0; bool hit_read = m_transaction_tab.hit_read( m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()],index); bool hit_write = m_transaction_tab.hit_write( m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()]); bool wok = !m_transaction_tab.full(index); #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Check TRT state" << " / hit_read = " << hit_read << " / hit_write = " << hit_write << " / wok = " << wok << " / index = " << index << std::endl; } #endif if ( hit_read || !wok || hit_write ) // missing line already requested or no space in TRT { r_sc_fsm = SC_WAIT; } else { r_sc_trt_index = index; r_sc_fsm = SC_MISS_TRT_SET; } } break; } //////////////////// case SC_MISS_TRT_SET: // register the GET transaction in TRT { if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) { std::vector be_vector; std::vector data_vector; be_vector.clear(); data_vector.clear(); for ( size_t i=0; i Register a GET transaction in TRT" << std::hex << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] << " / trt_index = " << r_sc_trt_index.read() << std::endl; } #endif } break; } ////////////////////// case SC_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line { if ( !r_sc_to_ixr_cmd_req ) { r_sc_to_ixr_cmd_req = true; r_sc_to_ixr_cmd_write = false; r_sc_to_ixr_cmd_trdid = r_sc_trt_index.read(); r_sc_to_ixr_cmd_nline = m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()]; r_sc_fsm = SC_WAIT; #if DEBUG_MEMC_SC if( m_debug_sc_fsm ) { std::cout << " Request a GET transaction to IXR_CMD FSM" << std::hex << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] << " / trt_index = " << r_sc_trt_index.read() << std::endl; } #endif } break; } } // end switch r_sc_fsm ////////////////////////////////////////////////////////////////////////////// // INIT_CMD FSM ////////////////////////////////////////////////////////////////////////////// // The INIT_CMD fsm controls the VCI CMD initiator port on the coherence // network, used to update or invalidate cache lines in L1 caches. // // It implements a round-robin priority between the three possible client FSMs // XRAM_RSP, WRITE and SC. Each FSM can request two types of services: // - r_xram_rsp_to_init_cmd_multi_req : multi-inval // r_xram_rsp_to_init_cmd_brdcast_req : broadcast-inval // - r_write_to_init_cmd_multi_req : multi-update // r_write_to_init_cmd_brdcast_req : broadcast-inval // - r_sc_to_init_cmd_multi_req : multi-update // r_sc_to_init_cmd_brdcast_req : broadcast-inval // // An inval request is a single cell VCI write command containing the // index of the line to be invalidated. // An update request is a multi-cells VCI write command : The first cell // contains the index of the cache line to be updated. The second cell contains // the index of the first modified word in the line. The following cells // contain the data. /////////////////////////////////////////////////////////////////////////////// switch ( r_init_cmd_fsm.read() ) { //////////////////////// case INIT_CMD_UPDT_IDLE: // XRAM_RSP FSM has highest priority { if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || r_xram_rsp_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; m_cpt_inval++; } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; m_cpt_inval++; } else if ( m_write_to_init_cmd_inst_fifo.rok() || r_write_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; m_cpt_update++; } else if ( r_write_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; m_cpt_inval++; } else if ( m_sc_to_init_cmd_inst_fifo.rok() || r_sc_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; m_cpt_update++; } else if( r_sc_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; m_cpt_inval++; } break; } ///////////////////////// case INIT_CMD_INVAL_IDLE: // WRITE FSM has highest priority { if ( m_write_to_init_cmd_inst_fifo.rok() || r_write_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; m_cpt_update++; } else if ( r_write_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; m_cpt_inval++; } else if ( m_sc_to_init_cmd_inst_fifo.rok() || r_sc_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; m_cpt_update++; } else if( r_sc_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; m_cpt_inval++; } else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || r_xram_rsp_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; m_cpt_inval++; } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; m_cpt_inval++; } break; } ////////////////////////// case INIT_CMD_SC_UPDT_IDLE: // SC FSM has highest priority { if ( m_sc_to_init_cmd_inst_fifo.rok() || r_sc_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; m_cpt_update++; } else if( r_sc_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; m_cpt_inval++; } else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || r_xram_rsp_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; m_cpt_inval++; } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; m_cpt_inval++; } else if ( m_write_to_init_cmd_inst_fifo.rok() || r_write_to_init_cmd_multi_req.read() ) { r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; m_cpt_update++; } else if ( r_write_to_init_cmd_brdcast_req.read() ) { r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; m_cpt_inval++; } break; } ////////////////////////// case INIT_CMD_INVAL_NLINE: // send a multi-inval (from XRAM_RSP) { if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() ) { if ( p_vci_ini.cmdack ) { m_cpt_inval_mult++; r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; xram_rsp_to_init_cmd_fifo_get = true; } } else { if( r_xram_rsp_to_init_cmd_multi_req.read() ) r_xram_rsp_to_init_cmd_multi_req = false; r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; } break; } /////////////////////////// case INIT_CMD_XRAM_BRDCAST: // send a broadcast-inval (from XRAM_RSP) { if ( p_vci_ini.cmdack ) { m_cpt_inval_brdcast++; r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; r_xram_rsp_to_init_cmd_brdcast_req = false; } break; } //////////////////////////// case INIT_CMD_WRITE_BRDCAST: // send a broadcast-inval (from WRITE FSM) { if( p_vci_ini.cmdack ) { #if DEBUG_MEMC_INIT_CMD if( m_debug_init_cmd_fsm ) { std::cout << " Broadcast-Inval for line " << r_write_to_init_cmd_nline.read() << std::endl; } #endif m_cpt_inval_brdcast++; r_write_to_init_cmd_brdcast_req = false; r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; } break; } ///////////////////////// case INIT_CMD_UPDT_NLINE: // send nline for a multi-update (from WRITE FSM) { if ( m_write_to_init_cmd_inst_fifo.rok() ) { if ( p_vci_ini.cmdack ) { m_cpt_update_mult++; r_init_cmd_fsm = INIT_CMD_UPDT_INDEX; // write_to_init_cmd_fifo_get = true; } } else { if ( r_write_to_init_cmd_multi_req.read() ) r_write_to_init_cmd_multi_req = false; r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; } break; } ///////////////////////// case INIT_CMD_UPDT_INDEX: // send word index for a multi-update (from WRITE FSM) { r_init_cmd_cpt = 0; if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_DATA; break; } //////////////////////// case INIT_CMD_UPDT_DATA: // send the data for a multi-update (from WRITE FSM) { if ( p_vci_ini.cmdack ) { if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) { r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; write_to_init_cmd_fifo_get = true; } else { r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; } } break; } ///////////////////////// case INIT_CMD_SC_BRDCAST: // send a broadcast-inval (from SC FSM) { if( p_vci_ini.cmdack ) { m_cpt_inval_brdcast++; r_sc_to_init_cmd_brdcast_req = false; r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; } break; } //////////////////////////// case INIT_CMD_SC_UPDT_NLINE: // send nline for a multi-update (from SC FSM) { if ( m_sc_to_init_cmd_inst_fifo.rok() ) { if ( p_vci_ini.cmdack ) { m_cpt_update_mult++; r_init_cmd_fsm = INIT_CMD_SC_UPDT_INDEX; } } else { if( r_sc_to_init_cmd_multi_req.read() ) r_sc_to_init_cmd_multi_req = false; r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; } break; } //////////////////////////// case INIT_CMD_SC_UPDT_INDEX: // send word index for a multi-update (from SC FSM) { if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA; break; } /////////////////////////// case INIT_CMD_SC_UPDT_DATA: // send first data for a multi-update (from SC FSM) { if ( p_vci_ini.cmdack ) { if ( r_sc_to_init_cmd_is_long.read() ) { r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA_HIGH; } else { sc_to_init_cmd_fifo_get = true; r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; } } break; } //////////////////////// case INIT_CMD_SC_UPDT_DATA_HIGH: // send second data for a multi-update (from SC FSM) { if ( p_vci_ini.cmdack ) { sc_to_init_cmd_fifo_get = true; r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; } break; } } // end switch r_init_cmd_fsm ///////////////////////////////////////////////////////////////////// // TGT_RSP FSM ///////////////////////////////////////////////////////////////////// // The TGT_RSP fsm sends the responses on the VCI target port // with a round robin priority between six requests : // - r_read_to_tgt_rsp_req // - r_write_to_tgt_rsp_req // - r_sc_to_tgt_rsp_req // - r_cleanup_to_tgt_rsp_req // - r_xram_rsp_to_tgt_rsp_req // - r_init_rsp_to_tgt_rsp_req // The ordering is : read > write > sc > xram > init > cleanup ///////////////////////////////////////////////////////////////////// switch ( r_tgt_rsp_fsm.read() ) { /////////////////////// case TGT_RSP_READ_IDLE: // write requests have the highest priority { if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; else if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } break; } //////////////////////// case TGT_RSP_WRITE_IDLE: // sc requests have the highest priority { if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; else if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; break; } /////////////////////// case TGT_RSP_SC_IDLE: // xram_rsp requests have the highest priority { if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; break; } /////////////////////// case TGT_RSP_XRAM_IDLE: // init requests have the highest priority { if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; else if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } break; } /////////////////////// case TGT_RSP_INIT_IDLE: // cleanup requests have the highest priority { if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; else if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; else if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; break; } /////////////////////// case TGT_RSP_CLEANUP_IDLE: // read requests have the highest priority { if ( r_read_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_READ; r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); } else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; else if ( r_xram_rsp_to_tgt_rsp_req ) { r_tgt_rsp_fsm = TGT_RSP_XRAM; r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); } else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; break; } ////////////////// case TGT_RSP_READ: // send the response to a read { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " Read response" << " / rsrcid = " << std::dec << r_read_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() << " / rdata = " << std::hex << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; } #endif if ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ) { r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; r_read_to_tgt_rsp_req = false; } else { r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; } } break; } /////////////////// case TGT_RSP_WRITE: // send the write acknowledge { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " Write response" << " / rsrcid = " << std::dec << r_write_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() << std::endl; } #endif r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; r_write_to_tgt_rsp_req = false; } break; } /////////////////// case TGT_RSP_CLEANUP: // pas clair pour moi (AG) { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " Cleanup response" << " / rsrcid = " << std::dec << r_cleanup_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() << std::endl; } #endif r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; r_cleanup_to_tgt_rsp_req = false; } break; } ////////////////// case TGT_RSP_SC: // send one atomic word response { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " SC response" << " / rsrcid = " << std::dec << r_sc_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_sc_to_tgt_rsp_trdid.read() << std::endl; } #endif r_tgt_rsp_fsm = TGT_RSP_SC_IDLE; r_sc_to_tgt_rsp_req = false; } break; } /////////////////////// case TGT_RSP_XRAM: // send the response after XRAM access { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " Response following XRAM access" << " / rsrcid = " << std::dec << r_xram_rsp_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() << " / rdata = " << std::hex << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; } #endif if ( (r_tgt_rsp_cpt.read() == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) || r_xram_rsp_to_tgt_rsp_rerror.read() ) { r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; r_xram_rsp_to_tgt_rsp_req = false; } else { r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; } } break; } ////////////////// case TGT_RSP_INIT: // send the write response after coherence transaction { if ( p_vci_tgt.rspack ) { #if DEBUG_MEMC_TGT_RSP if( m_debug_tgt_rsp_fsm ) { std::cout << " Write response after coherence transaction" << " / rsrcid = " << std::dec << r_init_rsp_to_tgt_rsp_srcid.read() << " / rtrdid = " << r_init_rsp_to_tgt_rsp_trdid.read() << std::endl; } #endif r_tgt_rsp_fsm = TGT_RSP_INIT_IDLE; r_init_rsp_to_tgt_rsp_req = false; } break; } } // end switch tgt_rsp_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_UPT FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT). // with a round robin priority between three FSMs : INIT_RSP > WRITE > XRAM_RSP > CLEANUP // - The WRITE FSM initiates update transactions and sets new entry in UPT. // - The XRAM_RSP FSM initiates inval transactions and sets new entry in UPT. // - The INIT_RSP FSM complete those trasactions and erase the UPT entry. // - The CLEANUP FSM decrement an entry in UPT. // The resource is always allocated. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_alloc_upt_fsm.read() ) { //////////////////////// case ALLOC_UPT_INIT_RSP: if ( (r_init_rsp_fsm.read() != INIT_RSP_UPT_LOCK) && (r_init_rsp_fsm.read() != INIT_RSP_UPT_CLEAR) ) { if ((r_write_fsm.read() == WRITE_UPT_LOCK) || (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; else if ((r_sc_fsm.read() == SC_UPT_LOCK) || (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; } break; ///////////////////// case ALLOC_UPT_WRITE: if ( (r_write_fsm.read() != WRITE_UPT_LOCK) && (r_write_fsm.read() != WRITE_BC_UPT_LOCK)) { if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; else if ((r_sc_fsm.read() == SC_UPT_LOCK) || (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; } break; //////////////////////// case ALLOC_UPT_XRAM_RSP: if (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK) { if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; else if ((r_sc_fsm.read() == SC_UPT_LOCK) || (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; } break; ////////////////////////// case ALLOC_UPT_CLEANUP: if(r_cleanup_fsm.read() != CLEANUP_UPT_LOCK ) { if ((r_sc_fsm.read() == SC_UPT_LOCK) || (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; } break; ////////////////////////// case ALLOC_UPT_SC: if( (r_sc_fsm.read() != SC_UPT_LOCK) && (r_sc_fsm.read() != SC_BC_UPT_LOCK)) { if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; } break; } // end switch r_alloc_upt_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_DIR FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_DIR FSM allocates the access to the directory and // the data cache with a round robin priority between 5 user FSMs : // The cyclic ordering is READ > WRITE > SC > CLEANUP > XRAM_RSP // The ressource is always allocated. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_alloc_dir_fsm.read() ) { //////////////////// case ALLOC_DIR_READ: if ( ( (r_read_fsm.read() != READ_DIR_LOCK) && (r_read_fsm.read() != READ_TRT_LOCK) && (r_read_fsm.read() != READ_HEAP_LOCK)) || ( (r_read_fsm.read() == READ_HEAP_LOCK) && (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) ) || ( (r_read_fsm.read() == READ_TRT_LOCK) && (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) ) ) { if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; } break; ///////////////////// case ALLOC_DIR_WRITE: if ( ((r_write_fsm.read() != WRITE_DIR_LOCK) && (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) && (r_write_fsm.read() != WRITE_DIR_READ) && (r_write_fsm.read() != WRITE_DIR_HIT) && (r_write_fsm.read() != WRITE_BC_TRT_LOCK) && (r_write_fsm.read() != WRITE_BC_UPT_LOCK) && (r_write_fsm.read() != WRITE_UPT_LOCK) && (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) || ( (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) && (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) ) || ( (r_write_fsm.read() == WRITE_MISS_TRT_LOCK) && (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) ) ) { if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; } break; //////////////////// case ALLOC_DIR_SC: if ( ((r_sc_fsm.read() != SC_DIR_LOCK) && (r_sc_fsm.read() != SC_DIR_HIT_READ ) && (r_sc_fsm.read() != SC_DIR_HIT_WRITE ) && // (r_sc_fsm.read() != SC_MISS_TRT_LOCK ) && (r_sc_fsm.read() != SC_BC_TRT_LOCK) && (r_sc_fsm.read() != SC_BC_UPT_LOCK) && (r_sc_fsm.read() != SC_UPT_LOCK) && (r_sc_fsm.read() != SC_UPT_HEAP_LOCK)) || ( (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) && (r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) ) || ( (r_sc_fsm.read() == SC_MISS_TRT_LOCK ) && (r_alloc_trt_fsm.read() == ALLOC_TRT_SC) ) ) { if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; } break; /////////////////////// case ALLOC_DIR_CLEANUP: if ( (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) && (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) ) { if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; } break; //////////////////////// case ALLOC_DIR_XRAM_RSP: if ( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) && (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK)) { if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; } break; } // end switch alloc_dir_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_TRT FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) // with a round robin priority between 4 user FSMs : // The cyclic priority is READ > WRITE > SC > XRAM_RSP // The ressource is always allocated. /////////////////////////////////////////////////////////////////////////////////// switch (r_alloc_trt_fsm) { //////////////////// case ALLOC_TRT_READ: if ( r_read_fsm.read() != READ_TRT_LOCK ) { if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) ) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; } break; ///////////////////// case ALLOC_TRT_WRITE: if ( (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) && (r_write_fsm.read() != WRITE_BC_TRT_LOCK) && (r_write_fsm.read() != WRITE_BC_UPT_LOCK)) { if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; } break; //////////////////// case ALLOC_TRT_SC: if ( (r_sc_fsm.read() != SC_MISS_TRT_LOCK) && (r_sc_fsm.read() != SC_BC_TRT_LOCK) && (r_sc_fsm.read() != SC_BC_UPT_LOCK)) { if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; } break; //////////////////////// case ALLOC_TRT_XRAM_RSP: if ( (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) && (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK)) { if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; } break; //////////////////////// case ALLOC_TRT_IXR_RSP: if ( (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) && (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ) ) { if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; } break; } // end switch alloc_trt_fsm //////////////////////////////////////////////////////////////////////////////////// // ALLOC_HEAP FSM //////////////////////////////////////////////////////////////////////////////////// // The ALLOC_HEAP FSM allocates the access to the heap // with a round robin priority between 5 user FSMs : // The cyclic ordering is READ > WRITE > SC > CLEANUP > XRAM_RSP // The ressource is always allocated. ///////////////////////////////////////////////////////////////////////////////////// switch ( r_alloc_heap_fsm.read() ) { //////////////////// case ALLOC_HEAP_READ: if ( (r_read_fsm.read() != READ_HEAP_LOCK) && (r_read_fsm.read() != READ_HEAP_ERASE) ) { if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; } break; ///////////////////// case ALLOC_HEAP_WRITE: if ( (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) && (r_write_fsm.read() != WRITE_UPT_REQ) && (r_write_fsm.read() != WRITE_UPT_NEXT) ) { if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; } break; //////////////////// case ALLOC_HEAP_SC: if ( (r_sc_fsm.read() != SC_UPT_HEAP_LOCK) && (r_sc_fsm.read() != SC_UPT_REQ ) && (r_sc_fsm.read() != SC_UPT_NEXT) ) { if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; } break; /////////////////////// case ALLOC_HEAP_CLEANUP: if ( (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) && (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH)&& (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN) ) { if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; } break; //////////////////////// case ALLOC_HEAP_XRAM_RSP: if ( r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE ) { if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; } break; } // end switch alloc_heap_fsm //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD to READ FIFO //////////////////////////////////////////////////////////////////////////////////// if ( cmd_read_fifo_put ) { if ( cmd_read_fifo_get ) { m_cmd_read_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read())); m_cmd_read_length_fifo.put_and_get(p_vci_tgt.plen.read()>>2); m_cmd_read_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_read_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_read_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); } else { m_cmd_read_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read())); m_cmd_read_length_fifo.simple_put(p_vci_tgt.plen.read()>>2); m_cmd_read_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_read_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_read_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); } } else { if ( cmd_read_fifo_get ) { m_cmd_read_addr_fifo.simple_get(); m_cmd_read_length_fifo.simple_get(); m_cmd_read_srcid_fifo.simple_get(); m_cmd_read_trdid_fifo.simple_get(); m_cmd_read_pktid_fifo.simple_get(); } } ///////////////////////////////////////////////////////////////////// // TGT_CMD to WRITE FIFO ///////////////////////////////////////////////////////////////////// if ( cmd_write_fifo_put ) { if ( cmd_write_fifo_get ) { m_cmd_write_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read())); m_cmd_write_eop_fifo.put_and_get(p_vci_tgt.eop.read()); m_cmd_write_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_write_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_write_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); m_cmd_write_data_fifo.put_and_get(p_vci_tgt.wdata.read()); m_cmd_write_be_fifo.put_and_get(p_vci_tgt.be.read()); } else { m_cmd_write_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read())); m_cmd_write_eop_fifo.simple_put(p_vci_tgt.eop.read()); m_cmd_write_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_write_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_write_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); m_cmd_write_data_fifo.simple_put(p_vci_tgt.wdata.read()); m_cmd_write_be_fifo.simple_put(p_vci_tgt.be.read()); } } else { if ( cmd_write_fifo_get ) { m_cmd_write_addr_fifo.simple_get(); m_cmd_write_eop_fifo.simple_get(); m_cmd_write_srcid_fifo.simple_get(); m_cmd_write_trdid_fifo.simple_get(); m_cmd_write_pktid_fifo.simple_get(); m_cmd_write_data_fifo.simple_get(); m_cmd_write_be_fifo.simple_get(); } } //////////////////////////////////////////////////////////////////////////////////// // TGT_CMD to SC FIFO //////////////////////////////////////////////////////////////////////////////////// if ( cmd_sc_fifo_put ) { if ( cmd_sc_fifo_get ) { m_cmd_sc_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read())); m_cmd_sc_eop_fifo.put_and_get(p_vci_tgt.eop.read()); m_cmd_sc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); m_cmd_sc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); m_cmd_sc_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); m_cmd_sc_wdata_fifo.put_and_get(p_vci_tgt.wdata.read()); } else { m_cmd_sc_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read())); m_cmd_sc_eop_fifo.simple_put(p_vci_tgt.eop.read()); m_cmd_sc_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); m_cmd_sc_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); m_cmd_sc_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); m_cmd_sc_wdata_fifo.simple_put(p_vci_tgt.wdata.read()); } } else { if ( cmd_sc_fifo_get ) { m_cmd_sc_addr_fifo.simple_get(); m_cmd_sc_eop_fifo.simple_get(); m_cmd_sc_srcid_fifo.simple_get(); m_cmd_sc_trdid_fifo.simple_get(); m_cmd_sc_pktid_fifo.simple_get(); m_cmd_sc_wdata_fifo.simple_get(); } } //////////////////////////////////////////////////////////////////////////////////// // WRITE to INIT_CMD FIFO //////////////////////////////////////////////////////////////////////////////////// if ( write_to_init_cmd_fifo_put ) { if ( write_to_init_cmd_fifo_get ) { m_write_to_init_cmd_inst_fifo.put_and_get(write_to_init_cmd_fifo_inst); m_write_to_init_cmd_srcid_fifo.put_and_get(write_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_write_to_init_cmd_cache_id_fifo.put_and_get(write_to_init_cmd_fifo_cache_id); #endif } else { m_write_to_init_cmd_inst_fifo.simple_put(write_to_init_cmd_fifo_inst); m_write_to_init_cmd_srcid_fifo.simple_put(write_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_write_to_init_cmd_cache_id_fifo.simple_put(write_to_init_cmd_fifo_cache_id); #endif } } else { if ( write_to_init_cmd_fifo_get ) { m_write_to_init_cmd_inst_fifo.simple_get(); m_write_to_init_cmd_srcid_fifo.simple_get(); #if L1_MULTI_CACHE m_write_to_init_cmd_cache_id_fifo.simple_get(); #endif } } //////////////////////////////////////////////////////////////////////////////////// // XRAM_RSP to INIT_CMD FIFO //////////////////////////////////////////////////////////////////////////////////// if ( xram_rsp_to_init_cmd_fifo_put ) { if ( xram_rsp_to_init_cmd_fifo_get ) { m_xram_rsp_to_init_cmd_inst_fifo.put_and_get(xram_rsp_to_init_cmd_fifo_inst); m_xram_rsp_to_init_cmd_srcid_fifo.put_and_get(xram_rsp_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_xram_rsp_to_init_cmd_cache_id_fifo.put_and_get(xram_rsp_to_init_cmd_fifo_cache_id); #endif } else { m_xram_rsp_to_init_cmd_inst_fifo.simple_put(xram_rsp_to_init_cmd_fifo_inst); m_xram_rsp_to_init_cmd_srcid_fifo.simple_put(xram_rsp_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_xram_rsp_to_init_cmd_cache_id_fifo.simple_put(xram_rsp_to_init_cmd_fifo_cache_id); #endif } } else { if ( xram_rsp_to_init_cmd_fifo_get ) { m_xram_rsp_to_init_cmd_inst_fifo.simple_get(); m_xram_rsp_to_init_cmd_srcid_fifo.simple_get(); #if L1_MULTI_CACHE m_xram_rsp_to_init_cmd_cache_id_fifo.simple_get(); #endif } } //////////////////////////////////////////////////////////////////////////////////// // SC to INIT_CMD FIFO //////////////////////////////////////////////////////////////////////////////////// if ( sc_to_init_cmd_fifo_put ) { if ( sc_to_init_cmd_fifo_get ) { m_sc_to_init_cmd_inst_fifo.put_and_get(sc_to_init_cmd_fifo_inst); m_sc_to_init_cmd_srcid_fifo.put_and_get(sc_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_sc_to_init_cmd_cache_id_fifo.put_and_get(sc_to_init_cmd_fifo_cache_id); #endif } else { m_sc_to_init_cmd_inst_fifo.simple_put(sc_to_init_cmd_fifo_inst); m_sc_to_init_cmd_srcid_fifo.simple_put(sc_to_init_cmd_fifo_srcid); #if L1_MULTI_CACHE m_sc_to_init_cmd_cache_id_fifo.simple_put(sc_to_init_cmd_fifo_cache_id); #endif } } else { if ( sc_to_init_cmd_fifo_get ) { m_sc_to_init_cmd_inst_fifo.simple_get(); m_sc_to_init_cmd_srcid_fifo.simple_get(); #if L1_MULTI_CACHE m_sc_to_init_cmd_cache_id_fifo.simple_get(); #endif } } m_cpt_cycles++; } // end transition() ///////////////////////////// tmpl(void)::genMoore() ///////////////////////////// { //////////////////////////////////////////////////////////// // Command signals on the p_vci_ixr port //////////////////////////////////////////////////////////// p_vci_ixr.be = 0xF; p_vci_ixr.pktid = 0; p_vci_ixr.srcid = m_srcid_ixr; p_vci_ixr.cons = false; p_vci_ixr.wrap = false; p_vci_ixr.contig = true; p_vci_ixr.clen = 0; p_vci_ixr.cfixed = false; if ( r_ixr_cmd_fsm.read() == IXR_CMD_READ_NLINE ) { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)(r_read_to_ixr_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_read_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = true; } else if ( r_ixr_cmd_fsm.read() == IXR_CMD_SC_NLINE ) { if(r_sc_to_ixr_cmd_write.read()){ p_vci_ixr.cmd = vci_param::CMD_WRITE; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)((r_sc_to_ixr_cmd_nline.read()*m_words+r_ixr_cmd_cpt.read())*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = r_sc_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read(); p_vci_ixr.trdid = r_sc_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-1)); } else { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)(r_sc_to_ixr_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_sc_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = true; } } else if ( r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_NLINE ) { if(r_write_to_ixr_cmd_write.read()){ p_vci_ixr.cmd = vci_param::CMD_WRITE; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)((r_write_to_ixr_cmd_nline.read()*m_words+r_ixr_cmd_cpt.read())*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = r_write_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read(); p_vci_ixr.trdid = r_write_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-1)); } else { p_vci_ixr.cmd = vci_param::CMD_READ; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)(r_write_to_ixr_cmd_nline.read()*m_words*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = 0x00000000; p_vci_ixr.trdid = r_write_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = true; } } else if ( r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_DATA ) { p_vci_ixr.cmd = vci_param::CMD_WRITE; p_vci_ixr.cmdval = true; p_vci_ixr.address = (addr_t)((r_xram_rsp_to_ixr_cmd_nline.read()*m_words+r_ixr_cmd_cpt.read())*4); p_vci_ixr.plen = m_words*4; p_vci_ixr.wdata = r_xram_rsp_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read(); p_vci_ixr.trdid = r_xram_rsp_to_ixr_cmd_trdid.read(); p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-1)); } else { p_vci_ixr.cmdval = false; p_vci_ixr.address = 0; p_vci_ixr.plen = 0; p_vci_ixr.wdata = 0; p_vci_ixr.trdid = 0; p_vci_ixr.eop = false; } //////////////////////////////////////////////////// // Response signals on the p_vci_ixr port //////////////////////////////////////////////////// if ( ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) || (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) ) p_vci_ixr.rspack = true; else p_vci_ixr.rspack = false; //////////////////////////////////////////////////// // Command signals on the p_vci_tgt port //////////////////////////////////////////////////// switch ((tgt_cmd_fsm_state_e)r_tgt_cmd_fsm.read()) { case TGT_CMD_IDLE: p_vci_tgt.cmdack = false; break; case TGT_CMD_READ: p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); break; case TGT_CMD_WRITE: p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); break; case TGT_CMD_ATOMIC: p_vci_tgt.cmdack = m_cmd_sc_addr_fifo.wok(); break; default: p_vci_tgt.cmdack = false; break; } //////////////////////////////////////////////////// // Response signals on the p_vci_tgt port //////////////////////////////////////////////////// switch ( r_tgt_rsp_fsm.read() ) { case TGT_RSP_READ_IDLE: case TGT_RSP_WRITE_IDLE: case TGT_RSP_SC_IDLE: case TGT_RSP_XRAM_IDLE: case TGT_RSP_INIT_IDLE: case TGT_RSP_CLEANUP_IDLE: p_vci_tgt.rspval = false; p_vci_tgt.rsrcid = 0; p_vci_tgt.rdata = 0; p_vci_tgt.rpktid = 0; p_vci_tgt.rtrdid = 0; p_vci_tgt.rerror = 0; p_vci_tgt.reop = false; break; case TGT_RSP_READ: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ); break; case TGT_RSP_WRITE: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); p_vci_tgt.reop = true; break; case TGT_RSP_CLEANUP: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; // Can be a SC rsp p_vci_tgt.reop = true; break; case TGT_RSP_SC: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_sc_to_tgt_rsp_data.read(); p_vci_tgt.rsrcid = r_sc_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_sc_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_sc_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; p_vci_tgt.reop = true; break; case TGT_RSP_XRAM: p_vci_tgt.rspval = true; p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = r_xram_rsp_to_tgt_rsp_rerror.read(); p_vci_tgt.reop = (( r_tgt_rsp_cpt.read() == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) || r_xram_rsp_to_tgt_rsp_rerror.read()); break; case TGT_RSP_INIT: p_vci_tgt.rspval = true; p_vci_tgt.rdata = 0; p_vci_tgt.rsrcid = r_init_rsp_to_tgt_rsp_srcid.read(); p_vci_tgt.rtrdid = r_init_rsp_to_tgt_rsp_trdid.read(); p_vci_tgt.rpktid = r_init_rsp_to_tgt_rsp_pktid.read(); p_vci_tgt.rerror = 0; // Can be a SC rsp p_vci_tgt.reop = true; break; } // end switch r_tgt_rsp_fsm /////////////////////////////////////////////////// // Command signals on the p_vci_ini port /////////////////////////////////////////////////// p_vci_ini.cmd = vci_param::CMD_WRITE; p_vci_ini.srcid = m_srcid_ini; p_vci_ini.cons = true; p_vci_ini.wrap = false; p_vci_ini.contig = false; p_vci_ini.clen = 0; p_vci_ini.cfixed = false; vci_addr_t vci_ini_address = 0; switch ( r_init_cmd_fsm.read() ) { case INIT_CMD_UPDT_IDLE: case INIT_CMD_INVAL_IDLE: case INIT_CMD_SC_UPDT_IDLE: p_vci_ini.cmdval = false; p_vci_ini.address = 0; p_vci_ini.wdata = 0; p_vci_ini.be = 0; p_vci_ini.plen = 0; p_vci_ini.trdid = 0; p_vci_ini.pktid = 0; p_vci_ini.eop = false; break; case INIT_CMD_INVAL_NLINE: { vci_ini_address = (vci_addr_t) m_xram_rsp_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = m_xram_rsp_to_init_cmd_inst_fifo.rok(); if(m_xram_rsp_to_init_cmd_inst_fifo.rok()){ if(m_xram_rsp_to_init_cmd_inst_fifo.read()) { p_vci_ini.address = (addr_t) (vci_ini_address+4); } else { p_vci_ini.address = (addr_t) (vci_ini_address); } } else p_vci_ini.address = 0; // prevent segmentation faults by reading an empty fifo p_vci_ini.wdata = (uint32_t)r_xram_rsp_to_init_cmd_nline.read(); p_vci_ini.be = ((r_xram_rsp_to_init_cmd_nline.read() >> 32) & 0x3); p_vci_ini.plen = 4; p_vci_ini.trdid = r_xram_rsp_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_xram_rsp_to_init_cmd_cache_id_fifo.read(); p_vci_ini.eop = true; break; } case INIT_CMD_XRAM_BRDCAST: p_vci_ini.cmdval = true; p_vci_ini.address = m_broadcast_address; p_vci_ini.wdata = (uint32_t)r_xram_rsp_to_init_cmd_nline.read(); p_vci_ini.be = ((r_xram_rsp_to_init_cmd_nline.read() >> 32) & 0x3); p_vci_ini.plen = 4; p_vci_ini.trdid = r_xram_rsp_to_init_cmd_trdid.read(); p_vci_ini.pktid = 0; p_vci_ini.eop = true; break; case INIT_CMD_WRITE_BRDCAST: p_vci_ini.cmdval = true; p_vci_ini.address = m_broadcast_address; p_vci_ini.wdata = (addr_t)r_write_to_init_cmd_nline.read(); p_vci_ini.be = ((r_write_to_init_cmd_nline.read() >> 32) & 0x3); p_vci_ini.plen = 4 ; p_vci_ini.eop = true; p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.pktid = 0; break; case INIT_CMD_UPDT_NLINE: vci_ini_address = (vci_addr_t) m_write_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = m_write_to_init_cmd_inst_fifo.rok(); if(m_write_to_init_cmd_inst_fifo.rok()){ if(m_write_to_init_cmd_inst_fifo.read()) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } } else { p_vci_ini.address = 0; } p_vci_ini.wdata = (uint32_t)r_write_to_init_cmd_nline.read(); p_vci_ini.be = ((r_write_to_init_cmd_nline.read() >> 32 ) & 0x3); p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.eop = false; p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_write_to_init_cmd_cache_id_fifo.read(); break; case INIT_CMD_UPDT_INDEX: vci_ini_address = (vci_addr_t) m_write_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = true; if(m_write_to_init_cmd_inst_fifo.read()) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } p_vci_ini.wdata = r_write_to_init_cmd_index.read(); p_vci_ini.be = 0xF; p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_write_to_init_cmd_cache_id_fifo.read(); p_vci_ini.eop = false; break; case INIT_CMD_UPDT_DATA: vci_ini_address = (vci_addr_t) m_write_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = true; if(m_write_to_init_cmd_inst_fifo.read()) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } p_vci_ini.wdata = r_write_to_init_cmd_data[r_init_cmd_cpt.read() + r_write_to_init_cmd_index.read()].read(); p_vci_ini.be = r_write_to_init_cmd_be[r_init_cmd_cpt.read() + r_write_to_init_cmd_index.read()].read() ; p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_write_to_init_cmd_cache_id_fifo.read(); p_vci_ini.eop = ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ); break; case INIT_CMD_SC_BRDCAST: p_vci_ini.cmdval = true; p_vci_ini.address = m_broadcast_address; p_vci_ini.wdata = (addr_t)r_sc_to_init_cmd_nline.read(); p_vci_ini.be = ((r_sc_to_init_cmd_nline.read() >> 32) & 0x3); p_vci_ini.plen = 4 ; p_vci_ini.eop = true; p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); p_vci_ini.pktid = 0; break; case INIT_CMD_SC_UPDT_NLINE: vci_ini_address = (vci_addr_t) m_sc_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = m_sc_to_init_cmd_inst_fifo.rok(); if(m_sc_to_init_cmd_inst_fifo.rok()){ if( m_sc_to_init_cmd_inst_fifo.read() ) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } } else { p_vci_ini.address = 0; } p_vci_ini.wdata = (uint32_t)r_sc_to_init_cmd_nline.read(); p_vci_ini.be = ((r_sc_to_init_cmd_nline.read() >> 32 ) & 0x3); if(r_sc_to_init_cmd_is_long.read()){ p_vci_ini.plen = 4 * 4; } else { p_vci_ini.plen = 4 * 3; } p_vci_ini.eop = false; p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); break; case INIT_CMD_SC_UPDT_INDEX: vci_ini_address = (vci_addr_t) m_sc_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = true; if( m_sc_to_init_cmd_inst_fifo.read() ) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } p_vci_ini.wdata = r_sc_to_init_cmd_index.read(); p_vci_ini.be = 0xF; if(r_sc_to_init_cmd_is_long.read()){ p_vci_ini.plen = 4 * 4; } else { p_vci_ini.plen = 4 * 3; } p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); p_vci_ini.eop = false; break; case INIT_CMD_SC_UPDT_DATA: vci_ini_address = (vci_addr_t) m_sc_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = true; if( m_sc_to_init_cmd_inst_fifo.read() ) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } p_vci_ini.wdata = r_sc_to_init_cmd_wdata.read(); p_vci_ini.be = 0xF; p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); if(r_sc_to_init_cmd_is_long.read()){ p_vci_ini.plen = 4 * 4; p_vci_ini.eop = false; } else { p_vci_ini.plen = 4 * 3; p_vci_ini.eop = true; } break; case INIT_CMD_SC_UPDT_DATA_HIGH: vci_ini_address = (vci_addr_t) m_sc_to_init_cmd_srcid_fifo.read() << (vci_param::N - vci_param::S); p_vci_ini.cmdval = true; if( m_sc_to_init_cmd_inst_fifo.read() ) { p_vci_ini.address = (addr_t)(vci_ini_address + 12); } else { p_vci_ini.address = (addr_t)(vci_ini_address + 8); } p_vci_ini.wdata = r_sc_to_init_cmd_wdata_high.read(); p_vci_ini.be = 0xF; p_vci_ini.plen = 4 * 4; p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); p_vci_ini.eop = true; break; } // end switch r_init_cmd_fsm ////////////////////////////////////////////////////// // Response signals on the p_vci_ini port ////////////////////////////////////////////////////// if ( r_init_rsp_fsm.read() == INIT_RSP_IDLE ) p_vci_ini.rspack = true; else p_vci_ini.rspack = false; ////////////////////////////////////////////////////// // Response signals on the p_vci_tgt_cleanup port ////////////////////////////////////////////////////// p_vci_tgt_cleanup.rspval = false; p_vci_tgt_cleanup.rsrcid = 0; p_vci_tgt_cleanup.rdata = 0; p_vci_tgt_cleanup.rpktid = 0; p_vci_tgt_cleanup.rtrdid = 0; p_vci_tgt_cleanup.rerror = 0; p_vci_tgt_cleanup.reop = false; p_vci_tgt_cleanup.cmdack = false ; switch(r_cleanup_fsm.read()){ case CLEANUP_IDLE: { p_vci_tgt_cleanup.cmdack = true ; break; } case CLEANUP_RSP: { p_vci_tgt_cleanup.rspval = true; p_vci_tgt_cleanup.rdata = 0; p_vci_tgt_cleanup.rsrcid = r_cleanup_srcid.read(); p_vci_tgt_cleanup.rpktid = r_cleanup_pktid.read(); p_vci_tgt_cleanup.rtrdid = r_cleanup_trdid.read(); p_vci_tgt_cleanup.rerror = 0x2 & ( (1 << vci_param::E) - 1); p_vci_tgt_cleanup.reop = 1; break; } } } // end genMoore() }} // end name space // Local Variables: // tab-width: 2 // c-basic-offset: 2 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4