Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 943:9a277733eafa
prev939:6f2302afeb89
next946:d41ee7994db7
author nkeynes
date Mon Jan 05 04:16:28 2009 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Handle remap-after-multihit-removal TLB updates
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) \
    31     MMIO_WRITE(MMU, TEA, vpn); \
    32     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    33     sh4_raise_tlb_exception(code);
    34 #define RAISE_MEM_ERROR(code, vpn) \
    35     MMIO_WRITE(MMU, TEA, vpn); \
    36     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    37     sh4_raise_exception(code);
    38 #define RAISE_TLB_MULTIHIT_ERROR(vpn) \
    39     sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    43 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    44 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    46 /* Primary address space (used directly by SH4 cores) */
    47 mem_region_fn_t *sh4_address_space;
    48 mem_region_fn_t *sh4_user_address_space;
    50 /* MMU-mapped storequeue targets. Only used with TLB on */
    51 mem_region_fn_t *storequeue_address_space; 
    52 mem_region_fn_t *storequeue_user_address_space; 
    54 /* Accessed from the UTLB accessor methods */
    55 uint32_t mmu_urc;
    56 uint32_t mmu_urb;
    58 /* Module globals */
    59 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    60 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    61 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    62 static uint32_t mmu_lrui;
    63 static uint32_t mmu_asid; // current asid
    65 /* Structures for 1K page handling */
    66 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    67 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    68 static int mmu_utlb_1k_free_index;
    71 /* Function prototypes */
    72 static void mmu_invalidate_tlb();
    73 static void mmu_utlb_register_all();
    74 static void mmu_utlb_remove_entry(int);
    75 static void mmu_utlb_insert_entry(int);
    76 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    77 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    78 static void mmu_set_tlb_enabled( int tlb_on );
    79 static void mmu_set_tlb_asid( uint32_t asid );
    80 static void mmu_set_storequeue_protected( int protected );
    81 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    82 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    83 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    84 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    85 static void mmu_utlb_1k_init();
    86 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    87 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    89 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    90 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    91 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    92 static uint32_t get_tlb_size_mask( uint32_t flags );
    93 static uint32_t get_tlb_size_pages( uint32_t flags );
    96 /*********************** Module public functions ****************************/
    98 /**
    99  * Allocate memory for the address space maps, and initialize them according
   100  * to the default (reset) values. (TLB is disabled by default)
   101  */
   103 void MMU_init()
   104 {
   105     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   106     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   107     storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
   108     storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
   110     mmu_set_tlb_enabled(0);
   111     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   112     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
   114     /* Setup P4 tlb/cache access regions */
   115     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   116     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   117     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   118     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   119     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   120     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   121     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   122     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   123     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   124     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   125     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   127     /* Setup P4 control region */
   128     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   129     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   130     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   131     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   132     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   133     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   134     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   135     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   136     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   137     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   138     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   139     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   140     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   142     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   143     mmu_utlb_1k_init();
   145     /* Ensure the code regions are executable */
   146     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   147     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   148 }
   150 void MMU_reset()
   151 {
   152     mmio_region_MMU_write( CCR, 0 );
   153     mmio_region_MMU_write( MMUCR, 0 );
   154 }
   156 void MMU_save_state( FILE *f )
   157 {
   158     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   159     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   160     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   161     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   162     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   163     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   164 }
   166 int MMU_load_state( FILE *f )
   167 {
   168     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   169         return 1;
   170     }
   171     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   172         return 1;
   173     }
   174     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   175         return 1;
   176     }
   177     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   178         return 1;
   179     }
   180     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   181         return 1;
   182     }
   183     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   184         return 1;
   185     }
   187     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   188     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   189     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
   190     return 0;
   191 }
   193 /**
   194  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   195  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   196  */
   197 void MMU_ldtlb()
   198 {
   199     mmu_urc %= mmu_urb;
   200     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   201         mmu_utlb_remove_entry( mmu_urc );
   202     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   203     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   204     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   205     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   206     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   207     mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
   208     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   209         mmu_utlb_insert_entry( mmu_urc );
   210 }
   213 MMIO_REGION_READ_FN( MMU, reg )
   214 {
   215     reg &= 0xFFF;
   216     switch( reg ) {
   217     case MMUCR:
   218         mmu_urc %= mmu_urb;
   219         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   220     default:
   221         return MMIO_READ( MMU, reg );
   222     }
   223 }
   225 MMIO_REGION_WRITE_FN( MMU, reg, val )
   226 {
   227     uint32_t tmp;
   228     reg &= 0xFFF;
   229     switch(reg) {
   230     case SH4VER:
   231         return;
   232     case PTEH:
   233         val &= 0xFFFFFCFF;
   234         if( (val & 0xFF) != mmu_asid ) {
   235             mmu_set_tlb_asid( val&0xFF );
   236             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   237         }
   238         break;
   239     case PTEL:
   240         val &= 0x1FFFFDFF;
   241         break;
   242     case PTEA:
   243         val &= 0x0000000F;
   244         break;
   245     case TRA:
   246         val &= 0x000003FC;
   247         break;
   248     case EXPEVT:
   249     case INTEVT:
   250         val &= 0x00000FFF;
   251         break;
   252     case MMUCR:
   253         if( val & MMUCR_TI ) {
   254             mmu_invalidate_tlb();
   255         }
   256         mmu_urc = (val >> 10) & 0x3F;
   257         mmu_urb = (val >> 18) & 0x3F;
   258         if( mmu_urb == 0 ) {
   259             mmu_urb = 0x40;
   260         }
   261         mmu_lrui = (val >> 26) & 0x3F;
   262         val &= 0x00000301;
   263         tmp = MMIO_READ( MMU, MMUCR );
   264         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   265             mmu_set_storequeue_protected( val & MMUCR_SQMD );
   266         }
   267         if( (val ^ tmp) & (MMUCR_AT) ) {
   268             // AT flag has changed state - flush the xlt cache as all bets
   269             // are off now. We also need to force an immediate exit from the
   270             // current block
   271             mmu_set_tlb_enabled( val & MMUCR_AT );
   272             MMIO_WRITE( MMU, MMUCR, val );
   273             sh4_flush_icache();
   274         }
   275         break;
   276     case CCR:
   277         CCN_set_cache_control( val );
   278         val &= 0x81A7;
   279         break;
   280     case MMUUNK1:
   281         /* Note that if the high bit is set, this appears to reset the machine.
   282          * Not emulating this behaviour yet until we know why...
   283          */
   284         val &= 0x00010007;
   285         break;
   286     case QACR0:
   287     case QACR1:
   288         val &= 0x0000001C;
   289         break;
   290     case PMCR1:
   291         PMM_write_control(0, val);
   292         val &= 0x0000C13F;
   293         break;
   294     case PMCR2:
   295         PMM_write_control(1, val);
   296         val &= 0x0000C13F;
   297         break;
   298     default:
   299         break;
   300     }
   301     MMIO_WRITE( MMU, reg, val );
   302 }
   304 /********************** 1K Page handling ***********************/
   305 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   306  * effort to manage - we justify this on the basis that most programs won't
   307  * actually use 1K pages, so we may as well optimize for the common case.
   308  * 
   309  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   310  * redirects requests to the 'real' page entry. These are allocated on an
   311  * as-needed basis, and returned to the pool when all subpages are empty.
   312  */ 
   313 static void mmu_utlb_1k_init()
   314 {
   315     int i;
   316     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   317         mmu_utlb_1k_free_list[i] = i;
   318         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   319     }
   320     mmu_utlb_1k_free_index = 0;
   321 }
   323 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   324 {
   325     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   326     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
   327     return entry;
   328 }    
   330 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   331 {
   332     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   333     assert( entryNo < UTLB_ENTRY_COUNT );
   334     assert( mmu_utlb_1k_free_index > 0 );
   335     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   336 }
   339 /********************** Address space maintenance *************************/
   341 /**
   342  * MMU accessor functions just increment URC - fixup here if necessary
   343  */
   344 static inline void mmu_urc_fixup()
   345 {
   346    mmu_urc %= mmu_urb; 
   347 }
   349 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   350 {
   351     int count = (end - start) >> 12;
   352     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   353     while( count-- > 0 ) {
   354         *ptr++ = fn;
   355     }
   356 }
   357 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   358 {
   359     int count = (end - start) >> 12;
   360     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   361     while( count-- > 0 ) {
   362         *ptr++ = fn;
   363     }
   364 }
   366 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   367 {
   368     int i;
   369     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   370         /* TLB on */
   371         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   372         sh4_address_space[(page|0xA0000000)>>12] = fn;
   373         /* Scan UTLB and update any direct-referencing entries */
   374     } else {
   375         /* Direct map to U0, P0, P1, P2, P3 */
   376         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   377             sh4_address_space[(page|i)>>12] = fn;
   378         }
   379         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   380             sh4_user_address_space[(page|i)>>12] = fn;
   381         }
   382     }
   383 }
   385 static void mmu_set_tlb_enabled( int tlb_on )
   386 {
   387     mem_region_fn_t *ptr, *uptr;
   388     int i;
   390     if( tlb_on ) {
   391         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   392         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   393         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   394         for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space; 
   395              i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
   396             *ptr++ = &mem_region_tlb_miss;
   397             *uptr++ = &mem_region_tlb_miss;
   398         }
   399         mmu_utlb_register_all();
   400     } else {
   401         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   402             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   403         }
   404         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   405             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   406         }
   407     }
   408 }
   410 static void mmu_set_storequeue_protected( int protected ) 
   411 {
   412     if( protected ) {
   413         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
   414     } else {
   415         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   416     }
   417 }
   419 static void mmu_set_tlb_asid( uint32_t asid )
   420 {
   421     /* Scan for pages that need to be remapped */
   422     int i;
   423     if( IS_SV_ENABLED() ) {
   424         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   425             if( mmu_utlb[i].flags & TLB_VALID ) {
   426                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   427                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   428                         if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   429                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   430                             mmu_utlb_remap_pages( FALSE, TRUE, i );
   431                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   432                         mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   433                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   434                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   435                     }
   436                 }
   437             }
   438         }
   439     } else {
   440         // Remap both Priv+user pages
   441         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   442             if( mmu_utlb[i].flags & TLB_VALID ) {
   443                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   444                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   445                         if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   446                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   447                             mmu_utlb_remap_pages( TRUE, TRUE, i );
   448                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   449                         mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   450                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   451                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   452                     }
   453                 }
   454             }
   455         }
   456     }
   458     mmu_asid = asid;
   459 }
   461 static uint32_t get_tlb_size_mask( uint32_t flags )
   462 {
   463     switch( flags & TLB_SIZE_MASK ) {
   464     case TLB_SIZE_1K: return MASK_1K;
   465     case TLB_SIZE_4K: return MASK_4K;
   466     case TLB_SIZE_64K: return MASK_64K;
   467     case TLB_SIZE_1M: return MASK_1M;
   468     default: return 0; /* Unreachable */
   469     }
   470 }
   471 static uint32_t get_tlb_size_pages( uint32_t flags )
   472 {
   473     switch( flags & TLB_SIZE_MASK ) {
   474     case TLB_SIZE_1K: return 0;
   475     case TLB_SIZE_4K: return 1;
   476     case TLB_SIZE_64K: return 16;
   477     case TLB_SIZE_1M: return 256;
   478     default: return 0; /* Unreachable */
   479     }
   480 }
   482 /**
   483  * Add a new TLB entry mapping to the address space table. If any of the pages
   484  * are already mapped, they are mapped to the TLB multi-hit page instead.
   485  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   486  */ 
   487 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   488 {
   489     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   490     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   491     gboolean mapping_ok = TRUE;
   492     int i;
   494     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   495         /* Storequeue mapping */
   496         ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
   497         uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
   498     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   499         user_page = NULL; /* No user access to P3 region */
   500     } else if( start_addr >= 0x80000000 ) {
   501         return TRUE; // No mapping - legal but meaningless
   502     }
   504     if( npages == 0 ) {
   505         struct utlb_1k_entry *ent;
   506         int i, idx = (start_addr >> 10) & 0x03;
   507         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   508             ent = (struct utlb_1k_entry *)*ptr;
   509         } else {
   510             ent = mmu_utlb_1k_alloc();
   511             /* New 1K struct - init to previous contents of region */
   512             for( i=0; i<4; i++ ) {
   513                 ent->subpages[i] = *ptr;
   514                 ent->user_subpages[i] = *uptr;
   515             }
   516             *ptr = &ent->fn;
   517             *uptr = &ent->user_fn;
   518         }
   520         if( priv_page != NULL ) {
   521             if( ent->subpages[idx] == &mem_region_tlb_miss ) {
   522                 ent->subpages[idx] = priv_page;
   523             } else {
   524                 mapping_ok = FALSE;
   525                 ent->subpages[idx] = &mem_region_tlb_multihit;
   526             }
   527         }
   528         if( user_page != NULL ) {
   529             if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
   530                 ent->user_subpages[idx] = user_page;
   531             } else {
   532                 mapping_ok = FALSE;
   533                 ent->user_subpages[idx] = &mem_region_tlb_multihit;
   534             }
   535         }
   537     } else {
   538         if( priv_page != NULL ) {
   539             if( user_page != NULL ) {
   540                 for( i=0; i<npages; i++ ) {
   541                     if( *ptr == &mem_region_tlb_miss ) {
   542                         *ptr++ = priv_page;
   543                         *uptr++ = user_page;
   544                     } else {
   545                         mapping_ok = FALSE;
   546                         *ptr++ = &mem_region_tlb_multihit;
   547                         *uptr++ = &mem_region_tlb_multihit;
   548                     }
   549                 }
   550             } else {
   551                 /* Privileged mapping only */
   552                 for( i=0; i<npages; i++ ) {
   553                     if( *ptr == &mem_region_tlb_miss ) {
   554                         *ptr++ = priv_page;
   555                     } else {
   556                         mapping_ok = FALSE;
   557                         *ptr++ = &mem_region_tlb_multihit;
   558                     }
   559                 }
   560             }
   561         } else if( user_page != NULL ) {
   562             /* User mapping only (eg ASID change remap w/ SV=1) */
   563             for( i=0; i<npages; i++ ) {
   564                 if( *uptr == &mem_region_tlb_miss ) {
   565                     *uptr++ = user_page;
   566                 } else {
   567                     mapping_ok = FALSE;
   568                     *uptr++ = &mem_region_tlb_multihit;
   569                 }
   570             }        
   571         }
   572     }
   573     return mapping_ok;
   574 }
   576 /**
   577  * Remap any pages within the region covered by entryNo, but not including 
   578  * entryNo itself. This is used to reestablish pages that were previously
   579  * covered by a multi-hit exception region when one of the pages is removed.
   580  */
   581 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   582 {
   583     int mask = mmu_utlb[entryNo].mask;
   584     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   585     int i;
   587     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   588         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   589             /* Overlapping region */
   590             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   591             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   592             uint32_t start_addr;
   593             int npages;
   595             if( mmu_utlb[i].mask >= mask ) {
   596                 /* entry is no larger than the area we're replacing - map completely */
   597                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   598                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   599             } else {
   600                 /* Otherwise map subset - region covered by removed page */
   601                 start_addr = remap_addr;
   602                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   603             }
   605             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   606                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   607             } else if( IS_SV_ENABLED() ) {
   608                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   609             }
   611         }
   612     }
   613 }
   615 /**
   616  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   617  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   618  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   619  */
   620 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   621 {
   622     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   623     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   624     gboolean unmapping_ok = TRUE;
   625     int i;
   627     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   628         /* Storequeue mapping */
   629         ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
   630         uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
   631     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   632         unmap_user = FALSE;
   633     } else if( start_addr >= 0x80000000 ) {
   634         return TRUE; // No mapping - legal but meaningless
   635     }
   637     if( npages == 0 ) { // 1K page
   638         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   639         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   640         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   641         if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
   642             unmapping_ok = FALSE;
   643         }
   644         if( unmap_priv )
   645             ent->subpages[idx] = &mem_region_tlb_miss;
   646         if( unmap_user )
   647             ent->user_subpages[idx] = &mem_region_tlb_miss;
   649         /* If all 4 subpages have the same content, merge them together and
   650          * release the 1K entry
   651          */
   652         mem_region_fn_t priv_page = ent->subpages[0];
   653         mem_region_fn_t user_page = ent->user_subpages[0];
   654         for( i=1; i<4; i++ ) {
   655             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   656                 mergeable = 0;
   657                 break;
   658             }
   659         }
   660         if( mergeable ) {
   661             mmu_utlb_1k_free(ent);
   662             *ptr = priv_page;
   663             *uptr = user_page;
   664         }
   665     } else {
   666         if( unmap_priv ) {
   667             if( unmap_user ) {
   668                 for( i=0; i<npages; i++ ) {
   669                     if( *ptr == &mem_region_tlb_multihit ) {
   670                         unmapping_ok = FALSE;
   671                     }
   672                     *ptr++ = &mem_region_tlb_miss;
   673                     *uptr++ = &mem_region_tlb_miss;
   674                 }
   675             } else {
   676                 /* Privileged (un)mapping only */
   677                 for( i=0; i<npages; i++ ) {
   678                     if( *ptr == &mem_region_tlb_multihit ) {
   679                         unmapping_ok = FALSE;
   680                     }
   681                     *ptr++ = &mem_region_tlb_miss;
   682                 }
   683             }
   684         } else if( unmap_user ) {
   685             /* User (un)mapping only */
   686             for( i=0; i<npages; i++ ) {
   687                 if( *uptr == &mem_region_tlb_multihit ) {
   688                     unmapping_ok = FALSE;
   689                 }
   690                 *uptr++ = &mem_region_tlb_miss;
   691             }            
   692         }
   693     }
   695     return unmapping_ok;
   696 }
   698 static void mmu_utlb_insert_entry( int entry )
   699 {
   700     struct utlb_entry *ent = &mmu_utlb[entry];
   701     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   702     mem_region_fn_t upage;
   703     sh4addr_t start_addr = ent->vpn & ent->mask;
   704     int npages = get_tlb_size_pages(ent->flags);
   706     if( (ent->flags & TLB_USERMODE) == 0 ) {
   707         upage = &mem_region_user_protected;
   708     } else {        
   709         upage = page;
   710     }
   711     mmu_utlb_pages[entry].user_fn = upage;
   713     if( (ent->flags & TLB_WRITABLE) == 0 ) {
   714         page->write_long = (mem_write_fn_t)tlb_protected_write;
   715         page->write_word = (mem_write_fn_t)tlb_protected_write;
   716         page->write_byte = (mem_write_fn_t)tlb_protected_write;
   717         page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   718         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   719     } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   720         page->write_long = (mem_write_fn_t)tlb_initial_write;
   721         page->write_word = (mem_write_fn_t)tlb_initial_write;
   722         page->write_byte = (mem_write_fn_t)tlb_initial_write;
   723         page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   724         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   725     } else {
   726         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   727     }
   729     /* Is page visible? */
   730     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   731         mmu_utlb_map_pages( page, upage, start_addr, npages );
   732     } else if( IS_SV_ENABLED() ) {
   733         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   734     }
   735 }
   737 static void mmu_utlb_remove_entry( int entry )
   738 {
   739     int i, j;
   740     struct utlb_entry *ent = &mmu_utlb[entry];
   741     sh4addr_t start_addr = ent->vpn&ent->mask;
   742     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   743     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   744     gboolean unmap_user;
   745     int npages = get_tlb_size_pages(ent->flags);
   747     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   748         unmap_user = TRUE;
   749     } else if( IS_SV_ENABLED() ) {
   750         unmap_user = FALSE;
   751     } else {
   752         return; // Not mapped
   753     }
   755     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   757     if( !clean_unmap ) {
   758         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   759     }
   760 }
   762 static void mmu_utlb_register_all()
   763 {
   764     int i;
   765     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   766         if( mmu_utlb[i].flags & TLB_VALID ) 
   767             mmu_utlb_insert_entry( i );
   768     }
   769 }
   771 static void mmu_invalidate_tlb()
   772 {
   773     int i;
   774     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   775         mmu_itlb[i].flags &= (~TLB_VALID);
   776     }
   777     if( IS_TLB_ENABLED() ) {
   778         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   779             if( mmu_utlb[i].flags & TLB_VALID ) {
   780                 mmu_utlb_remove_entry( i );
   781             }
   782         }
   783     }
   784     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   785         mmu_utlb[i].flags &= (~TLB_VALID);
   786     }
   787 }
   789 /******************************************************************************/
   790 /*                        MMU TLB address translation                         */
   791 /******************************************************************************/
   793 /**
   794  * Translate a 32-bit address into a UTLB entry number. Does not check for
   795  * page protection etc.
   796  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   797  */
   798 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   799 {
   800     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   801     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   802         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   803     } else if( fn == &mem_region_tlb_multihit ) {
   804         return -2;
   805     } else {
   806         return -1;
   807     }
   808 }
   811 /**
   812  * Perform the actual utlb lookup w/ asid matching.
   813  * Possible utcomes are:
   814  *   0..63 Single match - good, return entry found
   815  *   -1 No match - raise a tlb data miss exception
   816  *   -2 Multiple matches - raise a multi-hit exception (reset)
   817  * @param vpn virtual address to resolve
   818  * @return the resultant UTLB entry, or an error.
   819  */
   820 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   821 {
   822     int result = -1;
   823     unsigned int i;
   825     mmu_urc++;
   826     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   827         mmu_urc = 0;
   828     }
   830     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   831         if( (mmu_utlb[i].flags & TLB_VALID) &&
   832                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   833                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   834             if( result != -1 ) {
   835                 return -2;
   836             }
   837             result = i;
   838         }
   839     }
   840     return result;
   841 }
   843 /**
   844  * Perform the actual utlb lookup matching on vpn only
   845  * Possible utcomes are:
   846  *   0..63 Single match - good, return entry found
   847  *   -1 No match - raise a tlb data miss exception
   848  *   -2 Multiple matches - raise a multi-hit exception (reset)
   849  * @param vpn virtual address to resolve
   850  * @return the resultant UTLB entry, or an error.
   851  */
   852 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   853 {
   854     int result = -1;
   855     unsigned int i;
   857     mmu_urc++;
   858     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   859         mmu_urc = 0;
   860     }
   862     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   863         if( (mmu_utlb[i].flags & TLB_VALID) &&
   864                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   865             if( result != -1 ) {
   866                 return -2;
   867             }
   868             result = i;
   869         }
   870     }
   872     return result;
   873 }
   875 /**
   876  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   877  * @return the number (0-3) of the replaced entry.
   878  */
   879 static int inline mmu_itlb_update_from_utlb( int entryNo )
   880 {
   881     int replace;
   882     /* Determine entry to replace based on lrui */
   883     if( (mmu_lrui & 0x38) == 0x38 ) {
   884         replace = 0;
   885         mmu_lrui = mmu_lrui & 0x07;
   886     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   887         replace = 1;
   888         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   889     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   890         replace = 2;
   891         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   892     } else { // Note - gets invalid entries too
   893         replace = 3;
   894         mmu_lrui = (mmu_lrui | 0x0B);
   895     }
   897     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   898     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   899     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   900     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   901     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   902     return replace;
   903 }
   905 /**
   906  * Perform the actual itlb lookup w/ asid protection
   907  * Possible utcomes are:
   908  *   0..63 Single match - good, return entry found
   909  *   -1 No match - raise a tlb data miss exception
   910  *   -2 Multiple matches - raise a multi-hit exception (reset)
   911  * @param vpn virtual address to resolve
   912  * @return the resultant ITLB entry, or an error.
   913  */
   914 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   915 {
   916     int result = -1;
   917     unsigned int i;
   919     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   920         if( (mmu_itlb[i].flags & TLB_VALID) &&
   921                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   922                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   923             if( result != -1 ) {
   924                 return -2;
   925             }
   926             result = i;
   927         }
   928     }
   930     if( result == -1 ) {
   931         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
   932         if( utlbEntry < 0 ) {
   933             return utlbEntry;
   934         } else {
   935             return mmu_itlb_update_from_utlb( utlbEntry );
   936         }
   937     }
   939     switch( result ) {
   940     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   941     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   942     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   943     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   944     }
   946     return result;
   947 }
   949 /**
   950  * Perform the actual itlb lookup on vpn only
   951  * Possible utcomes are:
   952  *   0..63 Single match - good, return entry found
   953  *   -1 No match - raise a tlb data miss exception
   954  *   -2 Multiple matches - raise a multi-hit exception (reset)
   955  * @param vpn virtual address to resolve
   956  * @return the resultant ITLB entry, or an error.
   957  */
   958 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   959 {
   960     int result = -1;
   961     unsigned int i;
   963     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   964         if( (mmu_itlb[i].flags & TLB_VALID) &&
   965                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   966             if( result != -1 ) {
   967                 return -2;
   968             }
   969             result = i;
   970         }
   971     }
   973     if( result == -1 ) {
   974         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   975         if( utlbEntry < 0 ) {
   976             return utlbEntry;
   977         } else {
   978             return mmu_itlb_update_from_utlb( utlbEntry );
   979         }
   980     }
   982     switch( result ) {
   983     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   984     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   985     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   986     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   987     }
   989     return result;
   990 }
   992 /**
   993  * Update the icache for an untranslated address
   994  */
   995 static inline void mmu_update_icache_phys( sh4addr_t addr )
   996 {
   997     if( (addr & 0x1C000000) == 0x0C000000 ) {
   998         /* Main ram */
   999         sh4_icache.page_vma = addr & 0xFF000000;
  1000         sh4_icache.page_ppa = 0x0C000000;
  1001         sh4_icache.mask = 0xFF000000;
  1002         sh4_icache.page = dc_main_ram;
  1003     } else if( (addr & 0x1FE00000) == 0 ) {
  1004         /* BIOS ROM */
  1005         sh4_icache.page_vma = addr & 0xFFE00000;
  1006         sh4_icache.page_ppa = 0;
  1007         sh4_icache.mask = 0xFFE00000;
  1008         sh4_icache.page = dc_boot_rom;
  1009     } else {
  1010         /* not supported */
  1011         sh4_icache.page_vma = -1;
  1015 /**
  1016  * Update the sh4_icache structure to describe the page(s) containing the
  1017  * given vma. If the address does not reference a RAM/ROM region, the icache
  1018  * will be invalidated instead.
  1019  * If AT is on, this method will raise TLB exceptions normally
  1020  * (hence this method should only be used immediately prior to execution of
  1021  * code), and otherwise will set the icache according to the matching TLB entry.
  1022  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1023  * the icache.
  1024  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1025  * if an exception was raised.
  1026  */
  1027 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1029     int entryNo;
  1030     if( IS_SH4_PRIVMODE()  ) {
  1031         if( addr & 0x80000000 ) {
  1032             if( addr < 0xC0000000 ) {
  1033                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1034                 mmu_update_icache_phys(addr);
  1035                 return TRUE;
  1036             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1037                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1038                 return FALSE;
  1042         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1043         if( (mmucr & MMUCR_AT) == 0 ) {
  1044             mmu_update_icache_phys(addr);
  1045             return TRUE;
  1048         if( (mmucr & MMUCR_SV) == 0 )
  1049         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1050         else
  1051         	entryNo = mmu_itlb_lookup_vpn( addr );
  1052     } else {
  1053         if( addr & 0x80000000 ) {
  1054             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1055             return FALSE;
  1058         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1059         if( (mmucr & MMUCR_AT) == 0 ) {
  1060             mmu_update_icache_phys(addr);
  1061             return TRUE;
  1064         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1066         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1067             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1068             return FALSE;
  1072     switch(entryNo) {
  1073     case -1:
  1074     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1075     return FALSE;
  1076     case -2:
  1077     RAISE_TLB_MULTIHIT_ERROR(addr);
  1078     return FALSE;
  1079     default:
  1080         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1081         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1082         if( sh4_icache.page == NULL ) {
  1083             sh4_icache.page_vma = -1;
  1084         } else {
  1085             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1086             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1088         return TRUE;
  1092 /**
  1093  * Translate address for disassembly purposes (ie performs an instruction
  1094  * lookup) - does not raise exceptions or modify any state, and ignores
  1095  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1096  * on translation failure.
  1097  */
  1098 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1100     if( vma & 0x80000000 ) {
  1101         if( vma < 0xC0000000 ) {
  1102             /* P1, P2 and P4 regions are pass-through (no translation) */
  1103             return VMA_TO_EXT_ADDR(vma);
  1104         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1105             /* Not translatable */
  1106             return MMU_VMA_ERROR;
  1110     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1111     if( (mmucr & MMUCR_AT) == 0 ) {
  1112         return VMA_TO_EXT_ADDR(vma);
  1115     int entryNo = mmu_itlb_lookup_vpn( vma );
  1116     if( entryNo == -2 ) {
  1117         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1119     if( entryNo < 0 ) {
  1120         return MMU_VMA_ERROR;
  1121     } else {
  1122         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1123         (vma & (~mmu_itlb[entryNo].mask));
  1127 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
  1129     int queue = (addr&0x20)>>2;
  1130     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
  1131     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1132     sh4addr_t target = (addr&0x03FFFFE0) | hi;
  1133     ext_address_space[target>>12]->write_burst( target, src );
  1136 void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
  1138     int queue = (addr&0x20)>>2;
  1139     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1140     sh4addr_t target;
  1141     /* Store queue operation */
  1142     storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
  1145 /********************** TLB Direct-Access Regions ***************************/
  1146 #ifdef HAVE_FRAME_ADDRESS
  1147 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
  1148 #else
  1149 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1150 #endif
  1153 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1155 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1157     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1158     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1161 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1163     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1164     ent->vpn = val & 0xFFFFFC00;
  1165     ent->asid = val & 0x000000FF;
  1166     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1169 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1171     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1172     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1175 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1177     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1178     ent->ppn = val & 0x1FFFFC00;
  1179     ent->flags = val & 0x00001DA;
  1180     ent->mask = get_tlb_size_mask(val);
  1181     if( ent->ppn >= 0x1C000000 )
  1182         ent->ppn |= 0xE0000000;
  1185 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1186 #define UTLB_ASSOC(addr) (addr&0x80)
  1187 #define UTLB_DATA2(addr) (addr&0x00800000)
  1189 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1191     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1192     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1193     ((ent->flags & TLB_DIRTY)<<7);
  1195 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1197     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1198     if( UTLB_DATA2(addr) ) {
  1199         return ent->pcmcia;
  1200     } else {
  1201         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1205 /**
  1206  * Find a UTLB entry for the associative TLB write - same as the normal
  1207  * lookup but ignores the valid bit.
  1208  */
  1209 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1211     int result = -1;
  1212     unsigned int i;
  1213     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1214         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1215                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1216                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1217             if( result != -1 ) {
  1218                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1219                 return -2;
  1221             result = i;
  1224     return result;
  1227 /**
  1228  * Find a ITLB entry for the associative TLB write - same as the normal
  1229  * lookup but ignores the valid bit.
  1230  */
  1231 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1233     int result = -1;
  1234     unsigned int i;
  1235     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1236         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1237                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1238                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1239             if( result != -1 ) {
  1240                 return -2;
  1242             result = i;
  1245     return result;
  1248 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1250     if( UTLB_ASSOC(addr) ) {
  1251         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1252         if( utlb >= 0 ) {
  1253             struct utlb_entry *ent = &mmu_utlb[utlb];
  1254             uint32_t old_flags = ent->flags;
  1255             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1256             ent->flags |= (val & TLB_VALID);
  1257             ent->flags |= ((val & 0x200)>>7);
  1258             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1259                 if( old_flags & TLB_VALID )
  1260                     mmu_utlb_remove_entry( utlb );
  1261                 if( ent->flags & TLB_VALID )
  1262                     mmu_utlb_insert_entry( utlb );
  1266         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1267         if( itlb >= 0 ) {
  1268             struct itlb_entry *ent = &mmu_itlb[itlb];
  1269             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1272         if( itlb == -2 || utlb == -2 ) {
  1273             RAISE_TLB_MULTIHIT_ERROR(addr);
  1274             EXCEPTION_EXIT();
  1275             return;
  1277     } else {
  1278         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1279         if( ent->flags & TLB_VALID ) 
  1280             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1281         ent->vpn = (val & 0xFFFFFC00);
  1282         ent->asid = (val & 0xFF);
  1283         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1284         ent->flags |= (val & TLB_VALID);
  1285         ent->flags |= ((val & 0x200)>>7);
  1286         if( ent->flags & TLB_VALID ) 
  1287             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1291 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1293     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1294     if( UTLB_DATA2(addr) ) {
  1295         ent->pcmcia = val & 0x0000000F;
  1296     } else {
  1297         if( ent->flags & TLB_VALID ) 
  1298             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1299         ent->ppn = (val & 0x1FFFFC00);
  1300         ent->flags = (val & 0x000001FF);
  1301         ent->mask = get_tlb_size_mask(val);
  1302         if( ent->flags & TLB_VALID ) 
  1303             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1307 struct mem_region_fn p4_region_itlb_addr = {
  1308         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1309         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1310         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1311         unmapped_read_burst, unmapped_write_burst };
  1312 struct mem_region_fn p4_region_itlb_data = {
  1313         mmu_itlb_data_read, mmu_itlb_data_write,
  1314         mmu_itlb_data_read, mmu_itlb_data_write,
  1315         mmu_itlb_data_read, mmu_itlb_data_write,
  1316         unmapped_read_burst, unmapped_write_burst };
  1317 struct mem_region_fn p4_region_utlb_addr = {
  1318         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1319         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1320         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1321         unmapped_read_burst, unmapped_write_burst };
  1322 struct mem_region_fn p4_region_utlb_data = {
  1323         mmu_utlb_data_read, mmu_utlb_data_write,
  1324         mmu_utlb_data_read, mmu_utlb_data_write,
  1325         mmu_utlb_data_read, mmu_utlb_data_write,
  1326         unmapped_read_burst, unmapped_write_burst };
  1328 /********************** Error regions **************************/
  1330 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1332     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1333     EXCEPTION_EXIT();
  1336 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1338     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1339     EXCEPTION_EXIT();
  1342 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1344     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1345     EXCEPTION_EXIT();
  1348 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1350     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1351     EXCEPTION_EXIT();
  1354 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1356     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1357     EXCEPTION_EXIT();
  1360 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1362     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1363     EXCEPTION_EXIT();
  1366 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1368     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1369     EXCEPTION_EXIT();
  1372 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1374     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1375     EXCEPTION_EXIT();
  1378 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1380     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1381     EXCEPTION_EXIT();
  1384 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1386     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1387     EXCEPTION_EXIT();
  1390 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1392     MMIO_WRITE(MMU, TEA, addr);
  1393     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1394     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1395     EXCEPTION_EXIT();
  1398 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1400     MMIO_WRITE(MMU, TEA, addr);
  1401     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1402     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1403     EXCEPTION_EXIT();
  1405 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1407     MMIO_WRITE(MMU, TEA, addr);
  1408     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1409     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1410     EXCEPTION_EXIT();
  1413 /**
  1414  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1415  */
  1416 struct mem_region_fn mem_region_address_error = {
  1417         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1418         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1419         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1420         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
  1422 struct mem_region_fn mem_region_tlb_miss = {
  1423         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1424         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1425         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1426         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
  1428 struct mem_region_fn mem_region_user_protected = {
  1429         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1430         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1431         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1432         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
  1434 struct mem_region_fn mem_region_tlb_multihit = {
  1435         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1436         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1437         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1438         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
.