Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 951:63483914846f
prev948:545c85cc56f1
next952:18e579840923
author nkeynes
date Wed Jan 07 05:45:15 2009 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Tidy up exceptions+resets
Implement manual reset on general exception when SR.BL == 1
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
    31 #define RAISE_MEM_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_exception(code);
    35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
    37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    38 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    40 /* Primary address space (used directly by SH4 cores) */
    41 mem_region_fn_t *sh4_address_space;
    42 mem_region_fn_t *sh4_user_address_space;
    44 /* Accessed from the UTLB accessor methods */
    45 uint32_t mmu_urc;
    46 uint32_t mmu_urb;
    48 /* Module globals */
    49 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    50 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    51 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    52 static uint32_t mmu_lrui;
    53 static uint32_t mmu_asid; // current asid
    54 static struct utlb_default_regions *mmu_user_storequeue_regions;
    56 /* Structures for 1K page handling */
    57 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    58 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    59 static int mmu_utlb_1k_free_index;
    62 /* Function prototypes */
    63 static void mmu_invalidate_tlb();
    64 static void mmu_utlb_register_all();
    65 static void mmu_utlb_remove_entry(int);
    66 static void mmu_utlb_insert_entry(int);
    67 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    68 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    69 static void mmu_set_tlb_enabled( int tlb_on );
    70 static void mmu_set_tlb_asid( uint32_t asid );
    71 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    72 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    73 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    74 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    75 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    76 static void mmu_utlb_1k_init();
    77 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    78 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    80 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    81 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    82 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    83 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    84 static uint32_t get_tlb_size_mask( uint32_t flags );
    85 static uint32_t get_tlb_size_pages( uint32_t flags );
    87 #define DEFAULT_REGIONS 0
    88 #define DEFAULT_STOREQUEUE_REGIONS 1
    89 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    91 static struct utlb_default_regions mmu_default_regions[3] = {
    92         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    93         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
    94         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
    96 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
    98 /*********************** Module public functions ****************************/
   100 /**
   101  * Allocate memory for the address space maps, and initialize them according
   102  * to the default (reset) values. (TLB is disabled by default)
   103  */
   105 void MMU_init()
   106 {
   107     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   108     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   109     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   111     mmu_set_tlb_enabled(0);
   112     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   113     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   115     /* Setup P4 tlb/cache access regions */
   116     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   117     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   118     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   119     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   120     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   121     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   122     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   123     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   124     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   125     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   126     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   128     /* Setup P4 control region */
   129     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   130     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   131     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   132     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   133     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   134     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   135     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   136     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   137     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   138     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   139     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   140     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   141     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   143     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   144     mmu_utlb_1k_init();
   146     /* Ensure the code regions are executable */
   147     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   148     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   149 }
   151 void MMU_reset()
   152 {
   153     mmio_region_MMU_write( CCR, 0 );
   154     mmio_region_MMU_write( MMUCR, 0 );
   155 }
   157 void MMU_save_state( FILE *f )
   158 {
   159     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   160     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   161     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   162     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   163     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   164     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   165 }
   167 int MMU_load_state( FILE *f )
   168 {
   169     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   170         return 1;
   171     }
   172     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   173         return 1;
   174     }
   175     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   176         return 1;
   177     }
   178     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   179         return 1;
   180     }
   181     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   182         return 1;
   183     }
   184     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   185         return 1;
   186     }
   188     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   189     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   190     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   191     return 0;
   192 }
   194 /**
   195  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   196  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   197  */
   198 void MMU_ldtlb()
   199 {
   200     mmu_urc %= mmu_urb;
   201     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   202         mmu_utlb_remove_entry( mmu_urc );
   203     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   204     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   205     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   206     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   207     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   208     mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
   209     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   210         mmu_utlb_insert_entry( mmu_urc );
   211 }
   214 MMIO_REGION_READ_FN( MMU, reg )
   215 {
   216     reg &= 0xFFF;
   217     switch( reg ) {
   218     case MMUCR:
   219         mmu_urc %= mmu_urb;
   220         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   221     default:
   222         return MMIO_READ( MMU, reg );
   223     }
   224 }
   226 MMIO_REGION_WRITE_FN( MMU, reg, val )
   227 {
   228     uint32_t tmp;
   229     reg &= 0xFFF;
   230     switch(reg) {
   231     case SH4VER:
   232         return;
   233     case PTEH:
   234         val &= 0xFFFFFCFF;
   235         if( (val & 0xFF) != mmu_asid ) {
   236             mmu_set_tlb_asid( val&0xFF );
   237             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   238         }
   239         break;
   240     case PTEL:
   241         val &= 0x1FFFFDFF;
   242         break;
   243     case PTEA:
   244         val &= 0x0000000F;
   245         break;
   246     case TRA:
   247         val &= 0x000003FC;
   248         break;
   249     case EXPEVT:
   250     case INTEVT:
   251         val &= 0x00000FFF;
   252         break;
   253     case MMUCR:
   254         if( val & MMUCR_TI ) {
   255             mmu_invalidate_tlb();
   256         }
   257         mmu_urc = (val >> 10) & 0x3F;
   258         mmu_urb = (val >> 18) & 0x3F;
   259         if( mmu_urb == 0 ) {
   260             mmu_urb = 0x40;
   261         }
   262         mmu_lrui = (val >> 26) & 0x3F;
   263         val &= 0x00000301;
   264         tmp = MMIO_READ( MMU, MMUCR );
   265         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   266             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   267         }
   268         if( (val ^ tmp) & (MMUCR_AT) ) {
   269             // AT flag has changed state - flush the xlt cache as all bets
   270             // are off now. We also need to force an immediate exit from the
   271             // current block
   272             mmu_set_tlb_enabled( val & MMUCR_AT );
   273             MMIO_WRITE( MMU, MMUCR, val );
   274             sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
   275             xlat_flush_cache(); // If we're not running, flush the cache anyway
   276         }
   277         break;
   278     case CCR:
   279         CCN_set_cache_control( val );
   280         val &= 0x81A7;
   281         break;
   282     case MMUUNK1:
   283         /* Note that if the high bit is set, this appears to reset the machine.
   284          * Not emulating this behaviour yet until we know why...
   285          */
   286         val &= 0x00010007;
   287         break;
   288     case QACR0:
   289     case QACR1:
   290         val &= 0x0000001C;
   291         break;
   292     case PMCR1:
   293         PMM_write_control(0, val);
   294         val &= 0x0000C13F;
   295         break;
   296     case PMCR2:
   297         PMM_write_control(1, val);
   298         val &= 0x0000C13F;
   299         break;
   300     default:
   301         break;
   302     }
   303     MMIO_WRITE( MMU, reg, val );
   304 }
   306 /********************** 1K Page handling ***********************/
   307 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   308  * effort to manage - we justify this on the basis that most programs won't
   309  * actually use 1K pages, so we may as well optimize for the common case.
   310  * 
   311  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   312  * redirects requests to the 'real' page entry. These are allocated on an
   313  * as-needed basis, and returned to the pool when all subpages are empty.
   314  */ 
   315 static void mmu_utlb_1k_init()
   316 {
   317     int i;
   318     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   319         mmu_utlb_1k_free_list[i] = i;
   320         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   321     }
   322     mmu_utlb_1k_free_index = 0;
   323 }
   325 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   326 {
   327     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   328     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
   329     return entry;
   330 }    
   332 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   333 {
   334     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   335     assert( entryNo < UTLB_ENTRY_COUNT );
   336     assert( mmu_utlb_1k_free_index > 0 );
   337     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   338 }
   341 /********************** Address space maintenance *************************/
   343 /**
   344  * MMU accessor functions just increment URC - fixup here if necessary
   345  */
   346 static inline void mmu_urc_fixup()
   347 {
   348    mmu_urc %= mmu_urb; 
   349 }
   351 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   352 {
   353     int count = (end - start) >> 12;
   354     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   355     while( count-- > 0 ) {
   356         *ptr++ = fn;
   357     }
   358 }
   359 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   360 {
   361     int count = (end - start) >> 12;
   362     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   363     while( count-- > 0 ) {
   364         *ptr++ = fn;
   365     }
   366 }
   368 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   369 {
   370     int i;
   371     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   372         /* TLB on */
   373         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   374         sh4_address_space[(page|0xA0000000)>>12] = fn;
   375         /* Scan UTLB and update any direct-referencing entries */
   376     } else {
   377         /* Direct map to U0, P0, P1, P2, P3 */
   378         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   379             sh4_address_space[(page|i)>>12] = fn;
   380         }
   381         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   382             sh4_user_address_space[(page|i)>>12] = fn;
   383         }
   384     }
   385 }
   387 static void mmu_set_tlb_enabled( int tlb_on )
   388 {
   389     mem_region_fn_t *ptr, *uptr;
   390     int i;
   392     /* Reset the storequeue area */
   394     if( tlb_on ) {
   395         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   396         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   397         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   399         /* Default SQ prefetch goes to TLB miss (?) */
   400         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   401         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   402         mmu_utlb_register_all();
   403     } else {
   404         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   405             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   406         }
   407         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   408             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   409         }
   411         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   412         if( IS_STOREQUEUE_PROTECTED() ) {
   413             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   414         } else {
   415             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   416         }
   417     }
   419 }
   421 /**
   422  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   423  * anything expects to do this frequently.
   424  */
   425 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   426 {
   427     mem_region_fn_t nontlb_region;
   428     int i;
   430     if( protected ) {
   431         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   432         nontlb_region = &p4_region_storequeue_sqmd;
   433     } else {
   434         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   435         nontlb_region = &p4_region_storequeue; 
   436     }
   438     if( tlb_on ) {
   439         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   440         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   441             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   442                 mmu_utlb_insert_entry(i);
   443             }
   444         }
   445     } else {
   446         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   447     }
   449 }
   451 static void mmu_set_tlb_asid( uint32_t asid )
   452 {
   453     /* Scan for pages that need to be remapped */
   454     int i;
   455     if( IS_SV_ENABLED() ) {
   456         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   457             if( mmu_utlb[i].flags & TLB_VALID ) {
   458                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   459                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   460                         if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   461                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   462                             mmu_utlb_remap_pages( FALSE, TRUE, i );
   463                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   464                         mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   465                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   466                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   467                     }
   468                 }
   469             }
   470         }
   471     } else {
   472         // Remap both Priv+user pages
   473         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   474             if( mmu_utlb[i].flags & TLB_VALID ) {
   475                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   476                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   477                         if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   478                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   479                             mmu_utlb_remap_pages( TRUE, TRUE, i );
   480                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   481                         mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   482                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   483                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   484                     }
   485                 }
   486             }
   487         }
   488     }
   490     mmu_asid = asid;
   491 }
   493 static uint32_t get_tlb_size_mask( uint32_t flags )
   494 {
   495     switch( flags & TLB_SIZE_MASK ) {
   496     case TLB_SIZE_1K: return MASK_1K;
   497     case TLB_SIZE_4K: return MASK_4K;
   498     case TLB_SIZE_64K: return MASK_64K;
   499     case TLB_SIZE_1M: return MASK_1M;
   500     default: return 0; /* Unreachable */
   501     }
   502 }
   503 static uint32_t get_tlb_size_pages( uint32_t flags )
   504 {
   505     switch( flags & TLB_SIZE_MASK ) {
   506     case TLB_SIZE_1K: return 0;
   507     case TLB_SIZE_4K: return 1;
   508     case TLB_SIZE_64K: return 16;
   509     case TLB_SIZE_1M: return 256;
   510     default: return 0; /* Unreachable */
   511     }
   512 }
   514 /**
   515  * Add a new TLB entry mapping to the address space table. If any of the pages
   516  * are already mapped, they are mapped to the TLB multi-hit page instead.
   517  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   518  */ 
   519 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   520 {
   521     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   522     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   523     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   524     struct utlb_default_regions *userdefs = privdefs;    
   526     gboolean mapping_ok = TRUE;
   527     int i;
   529     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   530         /* Storequeue mapping */
   531         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   532         userdefs = mmu_user_storequeue_regions;
   533     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   534         user_page = NULL; /* No user access to P3 region */
   535     } else if( start_addr >= 0x80000000 ) {
   536         return TRUE; // No mapping - legal but meaningless
   537     }
   539     if( npages == 0 ) {
   540         struct utlb_1k_entry *ent;
   541         int i, idx = (start_addr >> 10) & 0x03;
   542         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   543             ent = (struct utlb_1k_entry *)*ptr;
   544         } else {
   545             ent = mmu_utlb_1k_alloc();
   546             /* New 1K struct - init to previous contents of region */
   547             for( i=0; i<4; i++ ) {
   548                 ent->subpages[i] = *ptr;
   549                 ent->user_subpages[i] = *uptr;
   550             }
   551             *ptr = &ent->fn;
   552             *uptr = &ent->user_fn;
   553         }
   555         if( priv_page != NULL ) {
   556             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   557                 ent->subpages[idx] = priv_page;
   558             } else {
   559                 mapping_ok = FALSE;
   560                 ent->subpages[idx] = privdefs->tlb_multihit;
   561             }
   562         }
   563         if( user_page != NULL ) {
   564             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   565                 ent->user_subpages[idx] = user_page;
   566             } else {
   567                 mapping_ok = FALSE;
   568                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   569             }
   570         }
   572     } else {
   573         if( priv_page != NULL ) {
   574             /* Privileged mapping only */
   575             for( i=0; i<npages; i++ ) {
   576                 if( *ptr == privdefs->tlb_miss ) {
   577                     *ptr++ = priv_page;
   578                 } else {
   579                     mapping_ok = FALSE;
   580                     *ptr++ = privdefs->tlb_multihit;
   581                 }
   582             }
   583         }
   584         if( user_page != NULL ) {
   585             /* User mapping only (eg ASID change remap w/ SV=1) */
   586             for( i=0; i<npages; i++ ) {
   587                 if( *uptr == userdefs->tlb_miss ) {
   588                     *uptr++ = user_page;
   589                 } else {
   590                     mapping_ok = FALSE;
   591                     *uptr++ = userdefs->tlb_multihit;
   592                 }
   593             }        
   594         }
   595     }
   597     return mapping_ok;
   598 }
   600 /**
   601  * Remap any pages within the region covered by entryNo, but not including 
   602  * entryNo itself. This is used to reestablish pages that were previously
   603  * covered by a multi-hit exception region when one of the pages is removed.
   604  */
   605 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   606 {
   607     int mask = mmu_utlb[entryNo].mask;
   608     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   609     int i;
   611     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   612         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   613             /* Overlapping region */
   614             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   615             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   616             uint32_t start_addr;
   617             int npages;
   619             if( mmu_utlb[i].mask >= mask ) {
   620                 /* entry is no larger than the area we're replacing - map completely */
   621                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   622                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   623             } else {
   624                 /* Otherwise map subset - region covered by removed page */
   625                 start_addr = remap_addr;
   626                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   627             }
   629             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   630                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   631             } else if( IS_SV_ENABLED() ) {
   632                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   633             }
   635         }
   636     }
   637 }
   639 /**
   640  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   641  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   642  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   643  */
   644 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   645 {
   646     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   647     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   648     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   649     struct utlb_default_regions *userdefs = privdefs;
   651     gboolean unmapping_ok = TRUE;
   652     int i;
   654     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   655         /* Storequeue mapping */
   656         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   657         userdefs = mmu_user_storequeue_regions;
   658     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   659         unmap_user = FALSE;
   660     } else if( start_addr >= 0x80000000 ) {
   661         return TRUE; // No mapping - legal but meaningless
   662     }
   664     if( npages == 0 ) { // 1K page
   665         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   666         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   667         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   668         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   669             unmapping_ok = FALSE;
   670         }
   671         if( unmap_priv )
   672             ent->subpages[idx] = privdefs->tlb_miss;
   673         if( unmap_user )
   674             ent->user_subpages[idx] = userdefs->tlb_miss;
   676         /* If all 4 subpages have the same content, merge them together and
   677          * release the 1K entry
   678          */
   679         mem_region_fn_t priv_page = ent->subpages[0];
   680         mem_region_fn_t user_page = ent->user_subpages[0];
   681         for( i=1; i<4; i++ ) {
   682             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   683                 mergeable = 0;
   684                 break;
   685             }
   686         }
   687         if( mergeable ) {
   688             mmu_utlb_1k_free(ent);
   689             *ptr = priv_page;
   690             *uptr = user_page;
   691         }
   692     } else {
   693         if( unmap_priv ) {
   694             /* Privileged (un)mapping */
   695             for( i=0; i<npages; i++ ) {
   696                 if( *ptr == privdefs->tlb_multihit ) {
   697                     unmapping_ok = FALSE;
   698                 }
   699                 *ptr++ = privdefs->tlb_miss;
   700             }
   701         }
   702         if( unmap_user ) {
   703             /* User (un)mapping */
   704             for( i=0; i<npages; i++ ) {
   705                 if( *uptr == userdefs->tlb_multihit ) {
   706                     unmapping_ok = FALSE;
   707                 }
   708                 *uptr++ = userdefs->tlb_miss;
   709             }            
   710         }
   711     }
   713     return unmapping_ok;
   714 }
   716 static void mmu_utlb_insert_entry( int entry )
   717 {
   718     struct utlb_entry *ent = &mmu_utlb[entry];
   719     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   720     mem_region_fn_t upage;
   721     sh4addr_t start_addr = ent->vpn & ent->mask;
   722     int npages = get_tlb_size_pages(ent->flags);
   724     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   725         /* Store queue mappings are a bit different - normal access is fixed to
   726          * the store queue register block, and we only map prefetches through
   727          * the TLB 
   728          */
   729         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   731         if( (ent->flags & TLB_USERMODE) == 0 ) {
   732             upage = mmu_user_storequeue_regions->tlb_prot;
   733         } else if( IS_STOREQUEUE_PROTECTED() ) {
   734             upage = &p4_region_storequeue_sqmd;
   735         } else {
   736             upage = page;
   737         }
   739     }  else {
   741         if( (ent->flags & TLB_USERMODE) == 0 ) {
   742             upage = &mem_region_tlb_protected;
   743         } else {        
   744             upage = page;
   745         }
   747         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   748             page->write_long = (mem_write_fn_t)tlb_protected_write;
   749             page->write_word = (mem_write_fn_t)tlb_protected_write;
   750             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   751             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   752             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   753         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   754             page->write_long = (mem_write_fn_t)tlb_initial_write;
   755             page->write_word = (mem_write_fn_t)tlb_initial_write;
   756             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   757             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   758             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   759         } else {
   760             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   761         }
   762     }
   764     mmu_utlb_pages[entry].user_fn = upage;
   766     /* Is page visible? */
   767     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   768         mmu_utlb_map_pages( page, upage, start_addr, npages );
   769     } else if( IS_SV_ENABLED() ) {
   770         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   771     }
   772 }
   774 static void mmu_utlb_remove_entry( int entry )
   775 {
   776     int i, j;
   777     struct utlb_entry *ent = &mmu_utlb[entry];
   778     sh4addr_t start_addr = ent->vpn&ent->mask;
   779     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   780     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   781     gboolean unmap_user;
   782     int npages = get_tlb_size_pages(ent->flags);
   784     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   785         unmap_user = TRUE;
   786     } else if( IS_SV_ENABLED() ) {
   787         unmap_user = FALSE;
   788     } else {
   789         return; // Not mapped
   790     }
   792     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   794     if( !clean_unmap ) {
   795         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   796     }
   797 }
   799 static void mmu_utlb_register_all()
   800 {
   801     int i;
   802     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   803         if( mmu_utlb[i].flags & TLB_VALID ) 
   804             mmu_utlb_insert_entry( i );
   805     }
   806 }
   808 static void mmu_invalidate_tlb()
   809 {
   810     int i;
   811     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   812         mmu_itlb[i].flags &= (~TLB_VALID);
   813     }
   814     if( IS_TLB_ENABLED() ) {
   815         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   816             if( mmu_utlb[i].flags & TLB_VALID ) {
   817                 mmu_utlb_remove_entry( i );
   818             }
   819         }
   820     }
   821     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   822         mmu_utlb[i].flags &= (~TLB_VALID);
   823     }
   824 }
   826 /******************************************************************************/
   827 /*                        MMU TLB address translation                         */
   828 /******************************************************************************/
   830 /**
   831  * Translate a 32-bit address into a UTLB entry number. Does not check for
   832  * page protection etc.
   833  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   834  */
   835 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   836 {
   837     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   838     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   839         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   840     } else if( fn == &mem_region_tlb_multihit ) {
   841         return -2;
   842     } else {
   843         return -1;
   844     }
   845 }
   848 /**
   849  * Perform the actual utlb lookup w/ asid matching.
   850  * Possible utcomes are:
   851  *   0..63 Single match - good, return entry found
   852  *   -1 No match - raise a tlb data miss exception
   853  *   -2 Multiple matches - raise a multi-hit exception (reset)
   854  * @param vpn virtual address to resolve
   855  * @return the resultant UTLB entry, or an error.
   856  */
   857 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   858 {
   859     int result = -1;
   860     unsigned int i;
   862     mmu_urc++;
   863     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   864         mmu_urc = 0;
   865     }
   867     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   868         if( (mmu_utlb[i].flags & TLB_VALID) &&
   869                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   870                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   871             if( result != -1 ) {
   872                 return -2;
   873             }
   874             result = i;
   875         }
   876     }
   877     return result;
   878 }
   880 /**
   881  * Perform the actual utlb lookup matching on vpn only
   882  * Possible utcomes are:
   883  *   0..63 Single match - good, return entry found
   884  *   -1 No match - raise a tlb data miss exception
   885  *   -2 Multiple matches - raise a multi-hit exception (reset)
   886  * @param vpn virtual address to resolve
   887  * @return the resultant UTLB entry, or an error.
   888  */
   889 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   890 {
   891     int result = -1;
   892     unsigned int i;
   894     mmu_urc++;
   895     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   896         mmu_urc = 0;
   897     }
   899     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   900         if( (mmu_utlb[i].flags & TLB_VALID) &&
   901                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   902             if( result != -1 ) {
   903                 return -2;
   904             }
   905             result = i;
   906         }
   907     }
   909     return result;
   910 }
   912 /**
   913  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   914  * @return the number (0-3) of the replaced entry.
   915  */
   916 static int inline mmu_itlb_update_from_utlb( int entryNo )
   917 {
   918     int replace;
   919     /* Determine entry to replace based on lrui */
   920     if( (mmu_lrui & 0x38) == 0x38 ) {
   921         replace = 0;
   922         mmu_lrui = mmu_lrui & 0x07;
   923     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   924         replace = 1;
   925         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   926     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   927         replace = 2;
   928         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   929     } else { // Note - gets invalid entries too
   930         replace = 3;
   931         mmu_lrui = (mmu_lrui | 0x0B);
   932     }
   934     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   935     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   936     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   937     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   938     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   939     return replace;
   940 }
   942 /**
   943  * Perform the actual itlb lookup w/ asid protection
   944  * Possible utcomes are:
   945  *   0..63 Single match - good, return entry found
   946  *   -1 No match - raise a tlb data miss exception
   947  *   -2 Multiple matches - raise a multi-hit exception (reset)
   948  * @param vpn virtual address to resolve
   949  * @return the resultant ITLB entry, or an error.
   950  */
   951 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   952 {
   953     int result = -1;
   954     unsigned int i;
   956     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   957         if( (mmu_itlb[i].flags & TLB_VALID) &&
   958                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   959                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   960             if( result != -1 ) {
   961                 return -2;
   962             }
   963             result = i;
   964         }
   965     }
   967     if( result == -1 ) {
   968         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
   969         if( utlbEntry < 0 ) {
   970             return utlbEntry;
   971         } else {
   972             return mmu_itlb_update_from_utlb( utlbEntry );
   973         }
   974     }
   976     switch( result ) {
   977     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   978     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   979     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   980     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   981     }
   983     return result;
   984 }
   986 /**
   987  * Perform the actual itlb lookup on vpn only
   988  * Possible utcomes are:
   989  *   0..63 Single match - good, return entry found
   990  *   -1 No match - raise a tlb data miss exception
   991  *   -2 Multiple matches - raise a multi-hit exception (reset)
   992  * @param vpn virtual address to resolve
   993  * @return the resultant ITLB entry, or an error.
   994  */
   995 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   996 {
   997     int result = -1;
   998     unsigned int i;
  1000     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1001         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1002                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1003             if( result != -1 ) {
  1004                 return -2;
  1006             result = i;
  1010     if( result == -1 ) {
  1011         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1012         if( utlbEntry < 0 ) {
  1013             return utlbEntry;
  1014         } else {
  1015             return mmu_itlb_update_from_utlb( utlbEntry );
  1019     switch( result ) {
  1020     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1021     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1022     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1023     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1026     return result;
  1029 /**
  1030  * Update the icache for an untranslated address
  1031  */
  1032 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1034     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1035         /* Main ram */
  1036         sh4_icache.page_vma = addr & 0xFF000000;
  1037         sh4_icache.page_ppa = 0x0C000000;
  1038         sh4_icache.mask = 0xFF000000;
  1039         sh4_icache.page = dc_main_ram;
  1040     } else if( (addr & 0x1FE00000) == 0 ) {
  1041         /* BIOS ROM */
  1042         sh4_icache.page_vma = addr & 0xFFE00000;
  1043         sh4_icache.page_ppa = 0;
  1044         sh4_icache.mask = 0xFFE00000;
  1045         sh4_icache.page = dc_boot_rom;
  1046     } else {
  1047         /* not supported */
  1048         sh4_icache.page_vma = -1;
  1052 /**
  1053  * Update the sh4_icache structure to describe the page(s) containing the
  1054  * given vma. If the address does not reference a RAM/ROM region, the icache
  1055  * will be invalidated instead.
  1056  * If AT is on, this method will raise TLB exceptions normally
  1057  * (hence this method should only be used immediately prior to execution of
  1058  * code), and otherwise will set the icache according to the matching TLB entry.
  1059  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1060  * the icache.
  1061  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1062  * if an exception was raised.
  1063  */
  1064 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1066     int entryNo;
  1067     if( IS_SH4_PRIVMODE()  ) {
  1068         if( addr & 0x80000000 ) {
  1069             if( addr < 0xC0000000 ) {
  1070                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1071                 mmu_update_icache_phys(addr);
  1072                 return TRUE;
  1073             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1074                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1075                 return FALSE;
  1079         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1080         if( (mmucr & MMUCR_AT) == 0 ) {
  1081             mmu_update_icache_phys(addr);
  1082             return TRUE;
  1085         if( (mmucr & MMUCR_SV) == 0 )
  1086         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1087         else
  1088         	entryNo = mmu_itlb_lookup_vpn( addr );
  1089     } else {
  1090         if( addr & 0x80000000 ) {
  1091             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1092             return FALSE;
  1095         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1096         if( (mmucr & MMUCR_AT) == 0 ) {
  1097             mmu_update_icache_phys(addr);
  1098             return TRUE;
  1101         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1103         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1104             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1105             return FALSE;
  1109     switch(entryNo) {
  1110     case -1:
  1111     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1112     return FALSE;
  1113     case -2:
  1114     RAISE_TLB_MULTIHIT_ERROR(addr);
  1115     return FALSE;
  1116     default:
  1117         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1118         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1119         if( sh4_icache.page == NULL ) {
  1120             sh4_icache.page_vma = -1;
  1121         } else {
  1122             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1123             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1125         return TRUE;
  1129 /**
  1130  * Translate address for disassembly purposes (ie performs an instruction
  1131  * lookup) - does not raise exceptions or modify any state, and ignores
  1132  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1133  * on translation failure.
  1134  */
  1135 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1137     if( vma & 0x80000000 ) {
  1138         if( vma < 0xC0000000 ) {
  1139             /* P1, P2 and P4 regions are pass-through (no translation) */
  1140             return VMA_TO_EXT_ADDR(vma);
  1141         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1142             /* Not translatable */
  1143             return MMU_VMA_ERROR;
  1147     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1148     if( (mmucr & MMUCR_AT) == 0 ) {
  1149         return VMA_TO_EXT_ADDR(vma);
  1152     int entryNo = mmu_itlb_lookup_vpn( vma );
  1153     if( entryNo == -2 ) {
  1154         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1156     if( entryNo < 0 ) {
  1157         return MMU_VMA_ERROR;
  1158     } else {
  1159         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1160         (vma & (~mmu_itlb[entryNo].mask));
  1164 /********************** TLB Direct-Access Regions ***************************/
  1165 #ifdef HAVE_FRAME_ADDRESS
  1166 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
  1167 #else
  1168 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1169 #endif
  1172 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1174 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1176     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1177     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1180 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1182     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1183     ent->vpn = val & 0xFFFFFC00;
  1184     ent->asid = val & 0x000000FF;
  1185     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1188 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1190     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1191     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1194 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1196     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1197     ent->ppn = val & 0x1FFFFC00;
  1198     ent->flags = val & 0x00001DA;
  1199     ent->mask = get_tlb_size_mask(val);
  1200     if( ent->ppn >= 0x1C000000 )
  1201         ent->ppn |= 0xE0000000;
  1204 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1205 #define UTLB_ASSOC(addr) (addr&0x80)
  1206 #define UTLB_DATA2(addr) (addr&0x00800000)
  1208 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1210     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1211     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1212     ((ent->flags & TLB_DIRTY)<<7);
  1214 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1216     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1217     if( UTLB_DATA2(addr) ) {
  1218         return ent->pcmcia;
  1219     } else {
  1220         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1224 /**
  1225  * Find a UTLB entry for the associative TLB write - same as the normal
  1226  * lookup but ignores the valid bit.
  1227  */
  1228 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1230     int result = -1;
  1231     unsigned int i;
  1232     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1233         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1234                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1235                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1236             if( result != -1 ) {
  1237                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1238                 return -2;
  1240             result = i;
  1243     return result;
  1246 /**
  1247  * Find a ITLB entry for the associative TLB write - same as the normal
  1248  * lookup but ignores the valid bit.
  1249  */
  1250 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1252     int result = -1;
  1253     unsigned int i;
  1254     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1255         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1256                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1257                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1258             if( result != -1 ) {
  1259                 return -2;
  1261             result = i;
  1264     return result;
  1267 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1269     if( UTLB_ASSOC(addr) ) {
  1270         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1271         if( utlb >= 0 ) {
  1272             struct utlb_entry *ent = &mmu_utlb[utlb];
  1273             uint32_t old_flags = ent->flags;
  1274             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1275             ent->flags |= (val & TLB_VALID);
  1276             ent->flags |= ((val & 0x200)>>7);
  1277             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1278                 if( old_flags & TLB_VALID )
  1279                     mmu_utlb_remove_entry( utlb );
  1280                 if( ent->flags & TLB_VALID )
  1281                     mmu_utlb_insert_entry( utlb );
  1285         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1286         if( itlb >= 0 ) {
  1287             struct itlb_entry *ent = &mmu_itlb[itlb];
  1288             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1291         if( itlb == -2 || utlb == -2 ) {
  1292             RAISE_TLB_MULTIHIT_ERROR(addr);
  1293             EXCEPTION_EXIT();
  1294             return;
  1296     } else {
  1297         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1298         if( ent->flags & TLB_VALID ) 
  1299             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1300         ent->vpn = (val & 0xFFFFFC00);
  1301         ent->asid = (val & 0xFF);
  1302         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1303         ent->flags |= (val & TLB_VALID);
  1304         ent->flags |= ((val & 0x200)>>7);
  1305         if( ent->flags & TLB_VALID ) 
  1306             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1310 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1312     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1313     if( UTLB_DATA2(addr) ) {
  1314         ent->pcmcia = val & 0x0000000F;
  1315     } else {
  1316         if( ent->flags & TLB_VALID ) 
  1317             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1318         ent->ppn = (val & 0x1FFFFC00);
  1319         ent->flags = (val & 0x000001FF);
  1320         ent->mask = get_tlb_size_mask(val);
  1321         if( ent->flags & TLB_VALID ) 
  1322             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1326 struct mem_region_fn p4_region_itlb_addr = {
  1327         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1328         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1329         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1330         unmapped_read_burst, unmapped_write_burst,
  1331         unmapped_prefetch };
  1332 struct mem_region_fn p4_region_itlb_data = {
  1333         mmu_itlb_data_read, mmu_itlb_data_write,
  1334         mmu_itlb_data_read, mmu_itlb_data_write,
  1335         mmu_itlb_data_read, mmu_itlb_data_write,
  1336         unmapped_read_burst, unmapped_write_burst,
  1337         unmapped_prefetch };
  1338 struct mem_region_fn p4_region_utlb_addr = {
  1339         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1340         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1341         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1342         unmapped_read_burst, unmapped_write_burst,
  1343         unmapped_prefetch };
  1344 struct mem_region_fn p4_region_utlb_data = {
  1345         mmu_utlb_data_read, mmu_utlb_data_write,
  1346         mmu_utlb_data_read, mmu_utlb_data_write,
  1347         mmu_utlb_data_read, mmu_utlb_data_write,
  1348         unmapped_read_burst, unmapped_write_burst,
  1349         unmapped_prefetch };
  1351 /********************** Error regions **************************/
  1353 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1355     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1356     EXCEPTION_EXIT();
  1359 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1361     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1362     EXCEPTION_EXIT();
  1365 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1367     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1368     EXCEPTION_EXIT();
  1371 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1373     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1374     EXCEPTION_EXIT();
  1377 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1379     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1380     EXCEPTION_EXIT();
  1383 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1385     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1386     EXCEPTION_EXIT();
  1389 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1391     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1392     EXCEPTION_EXIT();
  1395 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1397     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1398     EXCEPTION_EXIT();
  1401 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1403     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1404     EXCEPTION_EXIT();
  1407 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1409     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1410     EXCEPTION_EXIT();
  1413 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1415     sh4_raise_tlb_multihit(addr);
  1416     EXCEPTION_EXIT();
  1419 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1421     sh4_raise_tlb_multihit(addr);
  1422     EXCEPTION_EXIT();
  1424 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1426     sh4_raise_tlb_multihit(addr);
  1427     EXCEPTION_EXIT();
  1430 /**
  1431  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1432  */
  1433 struct mem_region_fn mem_region_address_error = {
  1434         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1435         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1436         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1437         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1438         unmapped_prefetch };
  1440 struct mem_region_fn mem_region_tlb_miss = {
  1441         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1442         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1443         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1444         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1445         unmapped_prefetch };
  1447 struct mem_region_fn mem_region_tlb_protected = {
  1448         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1449         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1450         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1451         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1452         unmapped_prefetch };
  1454 struct mem_region_fn mem_region_tlb_multihit = {
  1455         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1456         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1457         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1458         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1459         (mem_prefetch_fn_t)tlb_multi_hit_read };
  1462 /* Store-queue regions */
  1463 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1464  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1465  * some cases), in contrast to the ordinary fields above.
  1467  * There is probably a simpler way to do this.
  1468  */
  1470 struct mem_region_fn p4_region_storequeue = { 
  1471         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1472         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1473         unmapped_read_long, unmapped_write_long,
  1474         unmapped_read_burst, unmapped_write_burst,
  1475         ccn_storequeue_prefetch }; 
  1477 struct mem_region_fn p4_region_storequeue_miss = { 
  1478         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1479         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1480         unmapped_read_long, unmapped_write_long,
  1481         unmapped_read_burst, unmapped_write_burst,
  1482         (mem_prefetch_fn_t)tlb_miss_read }; 
  1484 struct mem_region_fn p4_region_storequeue_multihit = { 
  1485         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1486         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1487         unmapped_read_long, unmapped_write_long,
  1488         unmapped_read_burst, unmapped_write_burst,
  1489         (mem_prefetch_fn_t)tlb_multi_hit_read }; 
  1491 struct mem_region_fn p4_region_storequeue_protected = {
  1492         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1493         unmapped_read_long, unmapped_write_long,
  1494         unmapped_read_long, unmapped_write_long,
  1495         unmapped_read_burst, unmapped_write_burst,
  1496         (mem_prefetch_fn_t)tlb_protected_read };
  1498 struct mem_region_fn p4_region_storequeue_sqmd = {
  1499         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1500         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1501         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1502         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1503         (mem_prefetch_fn_t)address_error_read };        
  1505 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1506         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1507         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1508         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1509         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1510         (mem_prefetch_fn_t)tlb_miss_read }; 
  1512 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1513         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1514         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1515         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1516         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1517         (mem_prefetch_fn_t)tlb_multi_hit_read };        
  1519 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1520         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1521         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1522         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1523         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1524         (mem_prefetch_fn_t)tlb_protected_read };
.