1.1 --- a/src/sh4/mmu.c Sat Jan 03 03:30:26 2009 +0000
1.2 +++ b/src/sh4/mmu.c Mon Jan 05 04:16:28 2009 +0000
1.4 static void mmu_set_tlb_asid( uint32_t asid );
1.5 static void mmu_set_storequeue_protected( int protected );
1.6 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
1.7 -static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages );
1.8 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
1.9 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
1.10 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
1.11 static void mmu_utlb_1k_init();
1.12 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
1.13 @@ -420,13 +421,13 @@
1.14 /* Scan for pages that need to be remapped */
1.16 if( IS_SV_ENABLED() ) {
1.17 - // FIXME: Priv pages don't change - only user pages are mapped in/out
1.18 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.19 if( mmu_utlb[i].flags & TLB_VALID ) {
1.20 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.21 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.22 - mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.23 - get_tlb_size_pages(mmu_utlb[i].flags) );
1.24 + if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.25 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
1.26 + mmu_utlb_remap_pages( FALSE, TRUE, i );
1.27 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.28 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
1.29 mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.31 if( mmu_utlb[i].flags & TLB_VALID ) {
1.32 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.33 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.34 - mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.35 - get_tlb_size_pages(mmu_utlb[i].flags) );
1.36 + if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.37 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
1.38 + mmu_utlb_remap_pages( TRUE, TRUE, i );
1.39 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.40 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
1.41 mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.42 @@ -533,19 +535,31 @@
1.47 - if( user_page == NULL ) {
1.48 - /* Privileged mapping only */
1.49 - for( i=0; i<npages; i++ ) {
1.50 - if( *ptr == &mem_region_tlb_miss ) {
1.51 - *ptr++ = priv_page;
1.53 - mapping_ok = FALSE;
1.54 - *ptr++ = &mem_region_tlb_multihit;
1.55 + if( priv_page != NULL ) {
1.56 + if( user_page != NULL ) {
1.57 + for( i=0; i<npages; i++ ) {
1.58 + if( *ptr == &mem_region_tlb_miss ) {
1.59 + *ptr++ = priv_page;
1.60 + *uptr++ = user_page;
1.62 + mapping_ok = FALSE;
1.63 + *ptr++ = &mem_region_tlb_multihit;
1.64 + *uptr++ = &mem_region_tlb_multihit;
1.68 + /* Privileged mapping only */
1.69 + for( i=0; i<npages; i++ ) {
1.70 + if( *ptr == &mem_region_tlb_miss ) {
1.71 + *ptr++ = priv_page;
1.73 + mapping_ok = FALSE;
1.74 + *ptr++ = &mem_region_tlb_multihit;
1.78 - } else if( priv_page == NULL ) {
1.79 - /* User mapping only (eg ASID change remap) */
1.80 + } else if( user_page != NULL ) {
1.81 + /* User mapping only (eg ASID change remap w/ SV=1) */
1.82 for( i=0; i<npages; i++ ) {
1.83 if( *uptr == &mem_region_tlb_miss ) {
1.84 *uptr++ = user_page;
1.85 @@ -554,28 +568,56 @@
1.86 *uptr++ = &mem_region_tlb_multihit;
1.90 - for( i=0; i<npages; i++ ) {
1.91 - if( *ptr == &mem_region_tlb_miss ) {
1.92 - *ptr++ = priv_page;
1.93 - *uptr++ = user_page;
1.95 - mapping_ok = FALSE;
1.96 - *ptr++ = &mem_region_tlb_multihit;
1.97 - *uptr++ = &mem_region_tlb_multihit;
1.106 + * Remap any pages within the region covered by entryNo, but not including
1.107 + * entryNo itself. This is used to reestablish pages that were previously
1.108 + * covered by a multi-hit exception region when one of the pages is removed.
1.110 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
1.112 + int mask = mmu_utlb[entryNo].mask;
1.113 + uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
1.116 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.117 + if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
1.118 + /* Overlapping region */
1.119 + mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
1.120 + mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
1.121 + uint32_t start_addr;
1.124 + if( mmu_utlb[i].mask >= mask ) {
1.125 + /* entry is no larger than the area we're replacing - map completely */
1.126 + start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
1.127 + npages = get_tlb_size_pages( mmu_utlb[i].flags );
1.129 + /* Otherwise map subset - region covered by removed page */
1.130 + start_addr = remap_addr;
1.131 + npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
1.134 + if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
1.135 + mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
1.136 + } else if( IS_SV_ENABLED() ) {
1.137 + mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
1.145 * Remove a previous TLB mapping (replacing them with the TLB miss region).
1.146 * @return FALSE if any pages were previously mapped to the TLB multihit page,
1.147 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
1.149 -static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages )
1.150 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
1.152 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.153 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.154 @@ -599,8 +641,10 @@
1.155 if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
1.156 unmapping_ok = FALSE;
1.158 - ent->subpages[idx] = &mem_region_tlb_miss;
1.159 - ent->user_subpages[idx] = &mem_region_tlb_miss;
1.161 + ent->subpages[idx] = &mem_region_tlb_miss;
1.163 + ent->user_subpages[idx] = &mem_region_tlb_miss;
1.165 /* If all 4 subpages have the same content, merge them together and
1.166 * release the 1K entry
1.167 @@ -619,24 +663,35 @@
1.171 - if( !unmap_user ) {
1.172 - /* Privileged (un)mapping only */
1.173 + if( unmap_priv ) {
1.174 + if( unmap_user ) {
1.175 + for( i=0; i<npages; i++ ) {
1.176 + if( *ptr == &mem_region_tlb_multihit ) {
1.177 + unmapping_ok = FALSE;
1.179 + *ptr++ = &mem_region_tlb_miss;
1.180 + *uptr++ = &mem_region_tlb_miss;
1.183 + /* Privileged (un)mapping only */
1.184 + for( i=0; i<npages; i++ ) {
1.185 + if( *ptr == &mem_region_tlb_multihit ) {
1.186 + unmapping_ok = FALSE;
1.188 + *ptr++ = &mem_region_tlb_miss;
1.191 + } else if( unmap_user ) {
1.192 + /* User (un)mapping only */
1.193 for( i=0; i<npages; i++ ) {
1.194 - if( *ptr == &mem_region_tlb_multihit ) {
1.195 + if( *uptr == &mem_region_tlb_multihit ) {
1.196 unmapping_ok = FALSE;
1.198 - *ptr++ = &mem_region_tlb_miss;
1.201 - for( i=0; i<npages; i++ ) {
1.202 - if( *ptr == &mem_region_tlb_multihit ) {
1.203 - unmapping_ok = FALSE;
1.205 - *ptr++ = &mem_region_tlb_miss;
1.206 *uptr++ = &mem_region_tlb_miss;
1.212 return unmapping_ok;
1.215 @@ -697,17 +752,10 @@
1.216 return; // Not mapped
1.219 - gboolean clean_unmap = mmu_utlb_unmap_pages( unmap_user, start_addr, npages );
1.220 + gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
1.222 if( !clean_unmap ) {
1.223 - /* If we ran into a multi-hit, we now need to rescan the UTLB for the other entries
1.224 - * and remap them */
1.225 - for( j=0; j<UTLB_ENTRY_COUNT; j++ ) {
1.226 - uint32_t mask = MIN(mmu_utlb[j].mask, ent->mask);
1.227 - if( j != entry && (start_addr & mask) == (mmu_utlb[j].vpn & mask) ) {
1.231 + mmu_utlb_remap_pages( TRUE, unmap_user, entry );