Changeset 469 for trunk/kernel/mm


Ignore:
Timestamp:
Aug 20, 2018, 1:04:16 PM (6 years ago)
Author:
alain
Message:

1) Introduce the libsemaphore library.
2) Introduce a small libmath library, required by the "fft" application..
3) Introduce the multithreaded "fft" application.
4) Fix a bad synchronisation bug in the Copy-On-Write mechanism.

Location:
trunk/kernel/mm
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/page.h

    r457 r469  
    5656 * This structure defines a physical page descriptor.
    5757 * Size is 64 bytes for a 32 bits core...
     58 * The spinlock is used to test/modify the forks counter.
    5859 * TODO : the list of waiting threads seems to be unused [AG]
    59  $ TODO : the spinlock use has to be clarified [AG]
     60 * TODO : the refcount use has to be clarified
    6061 ************************************************************************************/
    6162
     
    7071        uint32_t          refcount;       /*! reference counter                    (4)  */
    7172        uint32_t          forks;          /*! number of pending forks              (4)  */
    72         spinlock_t        lock;           /*! To Be Defined [AG]                   (16) */
     73        spinlock_t        lock;           /*! protect the forks field              (4) */
    7374}
    7475page_t;
  • trunk/kernel/mm/vmm.c

    r457 r469  
    411411                page_t   * page_ptr;
    412412                xptr_t     forks_xp;
     413                xptr_t     lock_xp;
    413414
    414415                // update flags in remote GPT
     
    433434                        if( attr & GPT_MAPPED )
    434435                        {
     436                            // get pointers and cluster on page descriptor
    435437                            page_xp  = ppm_ppn2page( ppn );
    436438                            page_cxy = GET_CXY( page_xp );
    437439                            page_ptr = GET_PTR( page_xp );
     440
     441                            // get extended pointers on "forks" and "lock"
    438442                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
     443                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     444
     445                            // increment "forks"
     446                            remote_spinlock_lock( lock_xp );
    439447                            hal_remote_atomic_add( forks_xp , 1 );
     448                            remote_spinlock_unlock( lock_xp );
    440449                        }
    441450                    }   // end loop on vpn
     
    473482    vpn_t       vpn_base;
    474483    vpn_t       vpn_size;
    475     xptr_t      page_xp;
     484    xptr_t      page_xp;        // extended pointer on page descriptor
    476485    page_t    * page_ptr;
    477486    cxy_t       page_cxy;
     487    xptr_t      forks_xp;       // extended pointer on forks counter in page descriptor
     488    xptr_t      lock_xp;        // extended pointer on lock protecting the forks counter
    478489    xptr_t      parent_root_xp;
    479490    bool_t      mapped;
     
    592603                    if( mapped )
    593604                    {
    594                         page_xp = ppm_ppn2page( ppn );
     605                        // get pointers and cluster on page descriptor
     606                        page_xp  = ppm_ppn2page( ppn );
    595607                        page_cxy = GET_CXY( page_xp );
    596608                        page_ptr = GET_PTR( page_xp );
    597                         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
     609
     610                        // get extended pointers on "forks" and "lock"
     611                        forks_xp = XPTR( page_cxy , &page_ptr->forks );
     612                        lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     613
     614                        // increment "forks"
     615                        remote_spinlock_lock( lock_xp );
     616                        hal_remote_atomic_add( forks_xp , 1 );
     617                        remote_spinlock_unlock( lock_xp );
    598618
    599619#if DEBUG_VMM_FORK_COPY
     
    603623__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
    604624#endif
    605 
    606625                    }
    607626                }
     
    670689if( DEBUG_VMM_DESTROY < cycle )
    671690printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    672 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     691__FUNCTION__, CURRENT_THREAD->trdid, process->pid, local_cxy, cycle );
    673692#endif
    674693
     
    695714        vseg    = GET_PTR( vseg_xp );
    696715
    697 #if( DEBUG_VMM_DESTROY & 1 )
    698 if( DEBUG_VMM_DESTROY < cycle )
    699 printk("\n[DBG] %s : found %s vseg / vpn_base %x / vpn_size %d\n",
    700 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    701 #endif
    702716        // unmap and release physical pages
    703717        vmm_unmap_vseg( process , vseg );
     
    751765if( DEBUG_VMM_DESTROY < cycle )
    752766printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    753 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     767__FUNCTION__, CURRENT_THREAD->trdid, process->pid, local_cxy , cycle );
    754768#endif
    755769
     
    10691083    page_t    * page_ptr;   // page descriptor pointer
    10701084    xptr_t      forks_xp;   // extended pointer on pending forks counter
    1071     uint32_t    count;      // actual number of pendinf forks
     1085    xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
     1086    uint32_t    forks;      // actual number of pendinf forks
    10721087
    10731088#if DEBUG_VMM_UNMAP_VSEG
     
    11151130                page_ptr = GET_PTR( page_xp );
    11161131
    1117                 // FIXME lock the physical page
     1132                // get extended pointers on forks and lock fields
     1133                forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1134                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1135
     1136                // get lock protecting page descriptor
     1137                remote_spinlock_lock( lock_xp );
    11181138
    11191139                // get pending forks counter
    1120                 count = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) );
     1140                forks = hal_remote_lw( forks_xp );
    11211141               
    1122                 if( count )  // decrement pending forks counter
     1142                if( forks )  // decrement pending forks counter
    11231143                {
    1124                     forks_xp = XPTR( page_cxy , &page_ptr->forks );
    11251144                    hal_remote_atomic_add( forks_xp , -1 );
    11261145                } 
     
    11391158                }
    11401159
    1141                 // FIXME unlock the physical page
     1160                // release lock protecting page descriptor
     1161                remote_spinlock_unlock( lock_xp );
    11421162            }
    11431163        }
     
    14181438#if DEBUG_VMM_GET_ONE_PPN
    14191439thread_t * this = CURRENT_THREAD;
    1420 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1421 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
     1440if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    14221441printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
    14231442__FUNCTION__, this, vpn, vseg_type_str(type), index );
     
    14821501
    14831502#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    1484 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
    1485 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1503if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    14861504printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
    14871505__FUNCTION__, this, vpn, elf_offset );
     
    14991517
    15001518#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    1501 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1502 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
     1519if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15031520printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
    15041521__FUNCTION__, this, vpn );
     
    15191536
    15201537#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    1521 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1522 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
     1538if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15231539printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
    15241540__FUNCTION__, this, vpn );
     
    15511567
    15521568#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    1553 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1554 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
     1569if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15551570printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
    15561571"      %d bytes from mapper / %d bytes from BSS\n",
     
    15991614
    16001615#if DEBUG_VMM_GET_ONE_PPN
    1601 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    1602 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
     1616if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    16031617printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
    16041618__FUNCTION__ , this , vpn , *ppn );
     
    16281642#if DEBUG_VMM_GET_PTE
    16291643uint32_t   cycle = (uint32_t)hal_get_cycles();
    1630 // if( DEBUG_VMM_GET_PTE < cycle )
    1631 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
    1632 printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n",
    1633 __FUNCTION__ , this , vpn , process->pid , cow , cycle );
     1644if( DEBUG_VMM_GET_PTE < cycle )
     1645printk("\n[DBG] %s : thread %x in process %x enter / vpn %x / cow %d / cycle %d\n",
     1646__FUNCTION__, this->trdid, process->pid, vpn, cow, cycle );
    16341647#endif
    16351648
     
    16441657    // vseg has been checked by the vmm_handle_page_fault() function
    16451658    assert( (vseg != NULL) , __FUNCTION__,
    1646     "vseg undefined / vpn %x / thread %x / process %x / core[%x,%d] / cycle %d\n",
    1647     vpn, this, process->pid, local_cxy, this->core->lid,
     1659    "vseg undefined / vpn %x / thread %x in process %x / core[%x,%d] / cycle %d\n",
     1660    vpn, this->trdid, process->pid, local_cxy, this->core->lid,
    16481661    (uint32_t)hal_get_cycles() );
    16491662
     
    16581671
    16591672        assert( (old_attr & GPT_MAPPED), __FUNCTION__,
    1660         "PTE unmapped for a COW exception / vpn %x / thread %x / process %x / cycle %d\n",
     1673        "PTE unmapped for a COW exception / vpn %x / thread %x in process %x / cycle %d\n",
    16611674        vpn, this, process->pid, (uint32_t)hal_get_cycles() );
    16621675
    16631676#if( DEBUG_VMM_GET_PTE & 1 )
    1664 // if( DEBUG_VMM_GET_PTE < cycle )
    1665 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
    1666 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
    1667 __FUNCTION__, this, vpn, process->pid );
     1677if( DEBUG_VMM_GET_PTE < cycle )
     1678printk("\n[DBG] %s : thread %x in process %x handling COW for vpn %x\n",
     1679__FUNCTION__, this->trdid, process->pid, vpn );
    16681680#endif
    16691681
     
    16731685        page_t * page_ptr = GET_PTR( page_xp );
    16741686
     1687        // get extended pointers on forks and lock field in page descriptor
     1688        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1689        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1690
     1691        // take lock protecting page descriptor
     1692        remote_spinlock_lock( lock_xp );
     1693
    16751694        // get number of pending forks in page descriptor
    1676         uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) );
     1695        uint32_t forks = hal_remote_lw( forks_xp );
    16771696
    16781697        if( forks )        // pending fork => allocate a new page, copy old to new
     
    16961715                    GET_PTR( old_base_xp ),
    16971716                    CONFIG_PPM_PAGE_SIZE );
     1717
     1718             // decrement pending forks counter in page descriptor
     1719             hal_remote_atomic_add( forks_xp , -1 );
    16981720        }             
    1699         else               // no pending fork => keep the existing page, reset COW
     1721        else               // no pending fork => keep the existing page
    17001722        {
    17011723            new_ppn = old_ppn;
    17021724        }
    17031725
     1726        // release lock protecting page descriptor
     1727        remote_spinlock_unlock( lock_xp );
     1728
    17041729        // build new_attr : reset COW and set WRITABLE,
    17051730        new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW);
     
    17071732        // update GPT[vpn] for all GPT copies
    17081733        vmm_global_update_pte( process, vpn, new_attr, new_ppn );
    1709 
    1710         // decrement pending forks counter in page descriptor
    1711         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
    17121734    }
    17131735    else        //////////// page_fault request ///////////////////////////
     
    17241746
    17251747#if( DEBUG_VMM_GET_PTE & 1 )
    1726 // if( DEBUG_VMM_GET_PTE < cycle )
    1727 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) )
    1728 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
    1729 __FUNCTION__, this, vpn, process->pid );
     1748if( DEBUG_VMM_GET_PTE < cycle )
     1749printk("\n[DBG] %s : thread %x in process %x handling page fault for vpn %x\n",
     1750__FUNCTION__, this->trdid, process->pid, vpn );
    17301751#endif
    17311752            // allocate new_ppn, and initialize the new page
     
    17671788#if DEBUG_VMM_GET_PTE
    17681789cycle = (uint32_t)hal_get_cycles();
    1769 // if( DEBUG_VMM_GET_PTE < cycle )
    1770 if( (vpn == 0x403) && (local_cxy == 0) )
    1771 printk("\n[DBG] %s : thread %x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
    1772 __FUNCTION__, this, vpn, process->pid, new_ppn, new_attr, cycle );
     1790if( DEBUG_VMM_GET_PTE < cycle )
     1791printk("\n[DBG] %s : thread %x in process %x exit / vpn %x / ppn %x / attr %x / cycle %d\n",
     1792__FUNCTION__, this->trdid, process->pid, vpn, new_ppn, new_attr, cycle );
    17731793#endif
    17741794
     
    17971817#if DEBUG_VMM_HANDLE_PAGE_FAULT
    17981818uint32_t cycle = (uint32_t)hal_get_cycles();
    1799 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1800 if( (vpn == 0x403) && (local_cxy == 0) )
     1819if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    18011820printk("\n[DBG] %s : thread %x in process %x enter for vpn %x / core[%x,%d] / cycle %d\n",
    18021821__FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle );
     
    18361855
    18371856#if DEBUG_VMM_HANDLE_PAGE_FAULT
    1838 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1839 if( (vpn == 0x403) && (local_cxy == 0) )
     1857if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    18401858printk("\n[DBG] %s : thread %x in process %x call RPC_VMM_GET_PTE\n",
    18411859__FUNCTION__, this, process->pid );
     
    18701888#if DEBUG_VMM_HANDLE_PAGE_FAULT
    18711889cycle = (uint32_t)hal_get_cycles();
    1872 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1873 if( (vpn == 0x403) && (local_cxy == 0) )
     1890if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    18741891printk("\n[DBG] %s : thread %x in process %x exit for vpn %x / core[%x,%d] / cycle %d\n",
    18751892__FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle );
  • trunk/kernel/mm/vmm.h

    r457 r469  
    166166 *   valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set.
    167167 * - no STACK vseg is copied from  parent VMM to child VMM, because the child STACK vseg
    168  *   must be copied from the cluster containing the user thread requesting the fork().
     168 *   must be copied later from the cluster containing the user thread requesting the fork().
    169169 *********************************************************************************************
    170170 * @ child_process     : local pointer on local child process descriptor.
Note: See TracChangeset for help on using the changeset viewer.