Changeset 408 for trunk/hal/tsar_mips32/core/hal_gpt.c
- Timestamp:
- Dec 5, 2017, 4:20:07 PM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r407 r408 170 170 error = hal_gpt_set_pte( gpt, 171 171 vpn, 172 (local_cxy<<20) | (vpn & 0xFFFFF),173 attr);172 attr, 173 (local_cxy<<20) | (vpn & 0xFFFFF) ); 174 174 175 175 if( error ) … … 321 321 { 322 322 vpn = (ix1 << 9) | ix2; 323 printk(" - SMALL : vpn = %x / PT2[%d] = %x / pt2[%d] = %x\n",324 vpn , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn);323 printk(" - SMALL : vpn %X / ppn %X / attr %X\n", 324 vpn , pte2_ppn , tsar2gpt(pte2_attr) ); 325 325 } 326 326 } … … 334 334 error_t hal_gpt_set_pte( gpt_t * gpt, 335 335 vpn_t vpn, 336 ppn_t ppn,337 uint32_t attr ) // generic GPT attributes336 uint32_t attr, // generic GPT attributes 337 ppn_t ppn ) 338 338 { 339 339 uint32_t * pt1; // PT1 base addres … … 355 355 uint32_t tsar_attr; // PTE attributes for TSAR MMU 356 356 357 358 357 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / ppn = %x / gpt_attr = %x\n", 358 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn , attr ); 359 359 360 360 // compute indexes in PT1 and PT2 … … 368 368 tsar_attr = gpt2tsar( attr ); 369 369 370 371 370 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / &pt1 = %x / tsar_attr = %x\n", 371 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pt1 , tsar_attr ); 372 372 373 373 // get pointer on PT1[ix1] … … 400 400 pte1 = *pte1_ptr; 401 401 402 403 402 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / current_pte1 = %x\n", 403 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 ); 404 404 405 405 // allocate a PT2 if PT1 entry not valid … … 442 442 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 443 443 444 445 444 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / pte1 = %x / &pt2 = %x\n", 445 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 , pt2 ); 446 446 447 447 } … … 454 454 hal_fence(); 455 455 456 457 458 456 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit / vpn = %x / pte2_attr = %x / pte2_ppn = %x\n", 457 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , 458 pt2[2 * ix2] , pt2[2 * ix2 + 1] ); 459 459 460 460 return 0; … … 728 728 } // end hal_gpt_unlock_pte() 729 729 730 /////////////////////////////////////// 731 error_t hal_gpt_copy( gpt_t * dst_gpt, 732 gpt_t * src_gpt, 733 vpn_t vpn_base, 734 vpn_t vpn_size, 735 bool_t cow ) 736 { 737 vpn_t vpn; // current vpn 738 730 /////////////////////////////////////////// 731 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 732 xptr_t src_gpt_xp, 733 vpn_t vpn, 734 bool_t cow, 735 ppn_t * ppn, 736 bool_t * mapped ) 737 { 739 738 uint32_t ix1; // index in PT1 740 739 uint32_t ix2; // index in PT2 741 740 742 uint32_t * src_pt1; // local pointer on PT1 for SRC_GPT 743 uint32_t * dst_pt1; // local pointer on PT1 for DST_GPT 744 uint32_t * dst_pt2; // local pointer on PT2 for DST_GPT 745 uint32_t * src_pt2; // local pointer on PT2 for SRC_GPT 741 cxy_t src_cxy; // SRC GPT cluster 742 gpt_t * src_gpt; // SRC GPT local pointer 743 744 uint32_t * src_pt1; // local pointer on SRC PT1 745 uint32_t * dst_pt1; // local pointer on DST PT1 746 uint32_t * src_pt2; // local pointer on SRC PT2 747 uint32_t * dst_pt2; // local pointer on DST PT2 746 748 747 749 kmem_req_t req; // for dynamic PT2 allocation … … 750 752 uint32_t dst_pte1; 751 753 752 uint32_t pte2_attr;753 uint32_t pte2_ppn;754 uint32_t src_pte2_attr; 755 uint32_t src_pte2_ppn; 754 756 755 757 page_t * page; … … 759 761 ppn_t dst_pt2_ppn; 760 762 761 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 762 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 763 764 // check page size 765 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , 766 "for TSAR, the page must be 4 Kbytes\n" ); 767 768 // check SRC_PT1 and DST_PT1 existence 769 assert( (src_gpt->ptr != NULL) , __FUNCTION__ , "SRC_PT1 does not exist\n"); 770 assert( (dst_gpt->ptr != NULL) , __FUNCTION__ , "DST_PT1 does not exist\n"); 771 772 // get pointers on SRC_PT1 and DST_PT1 773 src_pt1 = (uint32_t *)src_gpt->ptr; 763 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn %x\n", 764 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 765 766 // get remote src_gpt cluster and local pointer 767 src_cxy = GET_CXY( src_gpt_xp ); 768 src_gpt = (gpt_t *)GET_PTR( src_gpt_xp ); 769 770 // get remote src_pt1 and local dst_pt1 771 src_pt1 = (uint32_t *)hal_remote_lpt( XPTR( src_cxy , &src_gpt->ptr ) ); 774 772 dst_pt1 = (uint32_t *)dst_gpt->ptr; 775 773 776 // scan pages in vseg 777 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 778 { 779 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 780 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 781 782 // get SRC_PT1 entry 783 src_pte1 = src_pt1[ix1]; 784 785 // do nothing if SRC_PTE1 unmapped 786 if( (src_pte1 & TSAR_MMU_MAPPED) != 0 ) // SRC_PTE1 is mapped 774 // check src_pt1 and dst_pt1 existence 775 assert( (src_pt1 != NULL) , __FUNCTION__ , "src_pt1 does not exist\n"); 776 assert( (dst_pt1 != NULL) , __FUNCTION__ , "dst_pt1 does not exist\n"); 777 778 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 779 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 780 781 // get src_pte1 782 src_pte1 = hal_remote_lw( XPTR( src_cxy , &src_pt1[ix1] ) ); 783 784 // do nothing if src_pte1 not MAPPED or not SMALL 785 if( (src_pte1 & TSAR_MMU_MAPPED) && (src_pte1 & TSAR_MMU_SMALL) ) 786 { 787 // get dst_pt1 entry 788 dst_pte1 = dst_pt1[ix1]; 789 790 // map dst_pte1 if required 791 if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) 792 { 793 // allocate one physical page for a new PT2 794 req.type = KMEM_PAGE; 795 req.size = 0; // 1 small page 796 req.flags = AF_KERNEL | AF_ZERO; 797 page = (page_t *)kmem_alloc( &req ); 798 799 if( page == NULL ) 800 { 801 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 802 return -1; 803 } 804 805 // build extended pointer on page descriptor 806 page_xp = XPTR( local_cxy , page ); 807 808 // get PPN for this new PT2 809 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 810 811 // build the new dst_pte1 812 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 813 814 // register it in DST_GPT 815 dst_pt1[ix1] = dst_pte1; 816 } 817 818 // get pointer on src_pt2 819 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 820 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 821 822 // get pointer on dst_pt2 823 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 824 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 825 826 // get attr and ppn from SRC_PT2 827 src_pte2_attr = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2] ) ); 828 src_pte2_ppn = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2 + 1] ) ); 829 830 // do nothing if src_pte2 not MAPPED 831 if( (src_pte2_attr & TSAR_MMU_MAPPED) != 0 ) 787 832 { 788 assert( (src_pte1 & TSAR_MMU_SMALL) , __FUNCTION__ , 789 "no BIG page for user process in TSAR architecture\n" ); 790 791 // get DST_PT1 entry 792 dst_pte1 = dst_pt1[ix1]; 793 794 // map dst_pte1 if required 795 if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) 796 { 797 // allocate one physical page for a new DST_PT2 798 req.type = KMEM_PAGE; 799 req.size = 0; // 1 small page 800 req.flags = AF_KERNEL | AF_ZERO; 801 page = (page_t *)kmem_alloc( &req ); 802 803 if( page == NULL ) 804 { 805 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 806 return ENOMEM; 807 } 808 809 // build extended pointer on page descriptor 810 page_xp = XPTR( local_cxy , page ); 811 812 // get PPN for this new DST_PT2 813 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 814 815 // build the new dst_pte1 816 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 817 818 // register it in DST_GPT 819 dst_pt1[ix1] = dst_pte1; 833 // set PPN in DST PTE2 834 dst_pt2[2*ix2+1] = src_pte2_ppn; 835 836 // set attributes in DST PTE2 837 if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) ) 838 { 839 dst_pt2[2*ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 840 } 841 else 842 { 843 dst_pt2[2*ix2] = src_pte2_attr; 820 844 } 821 845 822 // get PPN and pointer on SRC_PT2 823 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 824 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 825 826 // get PPN and pointer on DST_PT2 827 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 828 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 829 830 // get attr and ppn from SRC_PT2 831 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 832 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( src_pt2[2 * ix2 + 1] ); 833 834 // no copy if SRC_PTE2 unmapped 835 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) // valid PTE2 in SRC_GPT 836 { 837 // set a new PTE2 in DST_PT2 838 dst_pt2[2*ix2] = pte2_attr; 839 dst_pt2[2*ix2 + 1] = pte2_ppn; 840 841 // FIXME increment page descriptor refcount for the referenced page 842 843 // handle Copy-On-Write 844 if( cow && (pte2_attr & TSAR_MMU_WRITABLE) ) 845 { 846 // reset WRITABLE flag in DST_GPT 847 hal_atomic_and( &dst_pt2[2*ix2] , ~TSAR_MMU_WRITABLE ); 848 849 // set COW flag in DST_GPT 850 hal_atomic_or( &dst_pt2[2*ix2] , TSAR_MMU_COW ); 851 } 852 } 853 } // end if PTE1 mapped 854 } // end loop on vpn 846 // return "successfully copied" 847 *mapped = true; 848 *ppn = src_pte2_ppn; 849 850 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / copy done\n", 851 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 852 853 hal_fence(); 854 855 return 0; 856 } // end if PTE2 mapped 857 } // end if PTE1 mapped 858 859 // return "nothing done" 860 *mapped = false; 861 *ppn = 0; 862 863 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / nothing done\n", 864 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 855 865 856 866 hal_fence(); 857 867 858 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit\n",859 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );860 861 868 return 0; 862 869 863 } // end hal_gpt_copy() 870 } // end hal_gpt_pte_copy() 871 872 ////////////////////////////////////////// 873 bool_t hal_gpt_pte_is_mapped( gpt_t * gpt, 874 vpn_t vpn ) 875 { 876 uint32_t * pt1; 877 uint32_t pte1; 878 uint32_t pte2_attr; 879 880 uint32_t * pt2; 881 ppn_t pt2_ppn; 882 883 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 884 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 885 886 // get PTE1 value 887 pt1 = gpt->ptr; 888 pte1 = pt1[ix1]; 889 890 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 891 892 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 893 894 // compute PT2 base address 895 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 896 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 897 898 // get pte2_attr 899 pte2_attr = pt2[2*ix2]; 900 901 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 902 else return true; 903 904 } // end hal_gpt_pte_is_mapped() 864 905 865 906 /////////////////////////////////////// … … 869 910 uint32_t * pt1; 870 911 uint32_t pte1; 912 uint32_t pte2_attr; 871 913 872 914 uint32_t * pt2; … … 880 922 pte1 = pt1[ix1]; 881 923 882 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not mapped 883 { 884 return false; 885 } 886 887 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 888 { 889 return false; 890 } 891 else // it's a PTD1 892 { 893 // compute PT2 base address 894 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 895 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 896 897 if( pt2[2*ix2] & TSAR_MMU_COW ) return true; 898 else return false; 899 } 924 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 925 926 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 927 928 // compute PT2 base address 929 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 930 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 931 932 // get pte2_attr 933 pte2_attr = pt2[2*ix2]; 934 935 if( (CURRENT_THREAD == 0xe0000) && (hal_time_stamp() > 5380000) ) 936 printk("\n@@@ %s : vpn = %X / attr = %X\n", __FUNCTION__ , vpn , tsar2gpt( pte2_attr ) ); 937 938 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 939 940 if( (pte2_attr & TSAR_MMU_COW) == 0 ) return false; 941 else return true; 942 900 943 } // end hal_gpt_pte_is_cow() 901 944 902 903 904 905 906 907 908 909 910 911 912 913 /* deprecated : old hal_gpt_copy [AG] 914 915 // scan the SRC_PT1 916 for( ix1 = 0 ; ix1 < 2048 ; ix1++ ) 917 { 918 pte1 = src_pt1[ix1]; 919 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) 945 ///////////////////////////////////////// 946 void hal_gpt_flip_cow( bool_t set_cow, 947 xptr_t gpt_xp, 948 vpn_t vpn_base, 949 vpn_t vpn_size ) 950 { 951 cxy_t gpt_cxy; 952 gpt_t * gpt_ptr; 953 954 vpn_t vpn; 955 956 uint32_t ix1; 957 uint32_t ix2; 958 959 uint32_t * pt1; 960 uint32_t pte1; 961 962 uint32_t * pt2; 963 ppn_t pt2_ppn; 964 965 uint32_t old_attr; 966 uint32_t new_attr; 967 968 // get GPT cluster and local pointer 969 gpt_cxy = GET_CXY( gpt_xp ); 970 gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); 971 972 // get local PT1 pointer 973 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 974 975 // loop on pages 976 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 977 { 978 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 979 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 980 981 // get PTE1 value 982 pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); 983 984 // only MAPPED & SMALL PTEs are modified 985 if( (pte1 & TSAR_MMU_MAPPED) && (pte1 & TSAR_MMU_SMALL) ) 920 986 { 921 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // PTE1 => big kernel page 987 // compute PT2 base address 988 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 989 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 990 991 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), __FUNCTION__, 992 "PT2 and PT1 must be in the same cluster\n"); 993 994 // get current PTE2 attributes 995 old_attr = hal_remote_lw( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 996 997 // only MAPPED PTEs are modified 998 if( old_attr & TSAR_MMU_MAPPED ) 922 999 { 923 // big kernel pages are shared by all processes => copy it 924 dst_pt1[ix1] = pte1; 925 } 926 else // PTD1 => smal pages 927 { 928 // allocate one physical page for a PT2 in DST_GPT 929 kmem_req_t req; 930 req.type = KMEM_PAGE; 931 req.size = 0; // 1 small page 932 req.flags = AF_KERNEL | AF_ZERO; 933 page = (page_t *)kmem_alloc( &req ); 934 935 if( page == NULL ) 936 { 937 // TODO release all memory allocated to DST_GPT 938 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 939 return ENOMEM; 1000 if( (set_cow != 0) && (old_attr & TSAR_MMU_WRITABLE) ) 1001 { 1002 new_attr = (old_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 1003 hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); 940 1004 } 941 942 // get extended pointer on page descriptor 943 page_xp = XPTR( local_cxy , page ); 944 945 // get pointer on new PT2 in DST_GPT 946 xptr_t base_xp = ppm_page2base( page_xp ); 947 dst_pt2 = (uint32_t *)GET_PTR( base_xp ); 948 949 // set a new PTD1 in DST_GPT 950 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 951 dst_pt1[ix1] = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 952 953 // get pointer on PT2 in SRC_GPT 954 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 955 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 956 957 // scan the SRC_PT2 958 for( ix2 = 0 ; ix2 < 512 ; ix2++ ) 959 { 960 // get attr & ppn from PTE2 961 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 962 963 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) // valid PTE2 in SRC_GPT 964 { 965 // get GPT_WRITABLE & PPN 966 pte2_writable = pte2_attr & GPT_WRITABLE; 967 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( src_pt2[2 * ix2 + 1] ); 968 969 // set a new PTE2 in DST_GPT 970 dst_pt2[2*ix2] = pte2_attr; 971 dst_pt2[2*ix2 + 1] = pte2_ppn; 972 973 // handle Copy-On-Write 974 if( cow && pte2_writable ) 975 { 976 // reset GPT_WRITABLE in both SRC_GPT and DST_GPT 977 hal_atomic_and( &dst_pt2[2*ix2] , ~GPT_WRITABLE ); 978 hal_atomic_and( &src_pt2[2*ix2] , ~GPT_WRITABLE ); 979 980 // register PG_COW in page descriptor 981 page = (page_t *)GET_PTR( ppm_ppn2page( pte2_ppn ) ); 982 hal_atomic_or( &page->flags , PG_COW ); 983 hal_atomic_add( &page->fork_nr , 1 ); 984 } 985 } 986 } // end loop on ix2 987 } 988 } 989 } // end loop ix1 990 991 hal_fence(); 992 993 return 0; 994 995 } // end hal_gpt_copy() 996 997 */ 1005 if( (set_cow == 0) && (old_attr & TSAR_MMU_COW ) ) 1006 { 1007 new_attr = (old_attr | TSAR_MMU_WRITABLE) & (~TSAR_MMU_COW); 1008 hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); 1009 } 1010 } // end if PTE2 mapped 1011 } // end if PTE1 mapped 1012 } // end loop on pages 1013 1014 } // end hal_gpt_flip_cow() 1015 1016 ////////////////////////////////////////// 1017 void hal_gpt_update_pte( xptr_t gpt_xp, 1018 vpn_t vpn, 1019 uint32_t attr, // generic GPT attributes 1020 ppn_t ppn ) 1021 { 1022 uint32_t * pt1; // PT1 base addres 1023 uint32_t pte1; // PT1 entry value 1024 1025 ppn_t pt2_ppn; // PPN of PT2 1026 uint32_t * pt2; // PT2 base address 1027 1028 uint32_t ix1; // index in PT1 1029 uint32_t ix2; // index in PT2 1030 1031 uint32_t tsar_attr; // PTE attributes for TSAR MMU 1032 1033 // check attr argument MAPPED and SMALL 1034 if( (attr & GPT_MAPPED) == 0 ) return; 1035 if( (attr & GPT_SMALL ) == 0 ) return; 1036 1037 // get cluster and local pointer on remote GPT 1038 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1039 gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); 1040 1041 // compute indexes in PT1 and PT2 1042 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1043 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1044 1045 // get PT1 base 1046 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1047 1048 // compute tsar_attr from generic attributes 1049 tsar_attr = gpt2tsar( attr ); 1050 1051 // get PTE1 value 1052 pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); 1053 1054 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return; 1055 if( (pte1 & TSAR_MMU_SMALL ) == 0 ) return; 1056 1057 // get PT2 base from PTE1 1058 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1059 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1060 1061 // reset PTE2 1062 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , 0 ); 1063 hal_fence(); 1064 1065 // set PTE2 in this order 1066 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn ); 1067 hal_fence(); 1068 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , tsar_attr ); 1069 hal_fence(); 1070 1071 } // end hal_gpt_update_pte() 1072
Note: See TracChangeset
for help on using the changeset viewer.