Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 971:886e1ec8447d
prev968:6fb1481859a4
next973:7434ac745eff
author nkeynes
date Mon Jan 26 03:05:54 2009 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix TLB access to SH4 peripheral control regions
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
    31 #define RAISE_MEM_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_exception(code);
    35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
    37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    38 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    40 /* Primary address space (used directly by SH4 cores) */
    41 mem_region_fn_t *sh4_address_space;
    42 mem_region_fn_t *sh4_user_address_space;
    44 /* Accessed from the UTLB accessor methods */
    45 uint32_t mmu_urc;
    46 uint32_t mmu_urb;
    47 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
    49 /* Module globals */
    50 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    51 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    52 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    53 static uint32_t mmu_lrui;
    54 static uint32_t mmu_asid; // current asid
    55 static struct utlb_default_regions *mmu_user_storequeue_regions;
    57 /* Structures for 1K page handling */
    58 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    59 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    60 static int mmu_utlb_1k_free_index;
    63 /* Function prototypes */
    64 static void mmu_invalidate_tlb();
    65 static void mmu_utlb_register_all();
    66 static void mmu_utlb_remove_entry(int);
    67 static void mmu_utlb_insert_entry(int);
    68 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    69 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    70 static void mmu_set_tlb_enabled( int tlb_on );
    71 static void mmu_set_tlb_asid( uint32_t asid );
    72 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    73 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    74 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    75 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    76 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    77 static void mmu_utlb_1k_init();
    78 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    79 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    80 static int mmu_read_urc();
    82 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    83 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    84 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    85 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    86 static uint32_t get_tlb_size_mask( uint32_t flags );
    87 static uint32_t get_tlb_size_pages( uint32_t flags );
    89 #define DEFAULT_REGIONS 0
    90 #define DEFAULT_STOREQUEUE_REGIONS 1
    91 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    93 static struct utlb_default_regions mmu_default_regions[3] = {
    94         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    95         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
    96         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
    98 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
   100 /*********************** Module public functions ****************************/
   102 /**
   103  * Allocate memory for the address space maps, and initialize them according
   104  * to the default (reset) values. (TLB is disabled by default)
   105  */
   107 void MMU_init()
   108 {
   109     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   110     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   111     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   113     mmu_set_tlb_enabled(0);
   114     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   115     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   117     /* Setup P4 tlb/cache access regions */
   118     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   119     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   120     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   121     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   122     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   123     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   124     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   125     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   126     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   127     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   128     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   130     /* Setup P4 control region */
   131     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   132     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   133     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   134     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   135     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   136     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   137     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   138     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   139     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   140     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   141     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   142     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   143     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   145     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   146     mmu_utlb_1k_init();
   148     /* Ensure the code regions are executable (64-bit only). Although it might
   149      * be more portable to mmap these at runtime rather than using static decls
   150      */
   151 #if SIZEOF_VOID_P == 8
   152     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   153     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   154 #endif
   155 }
   157 void MMU_reset()
   158 {
   159     mmio_region_MMU_write( CCR, 0 );
   160     mmio_region_MMU_write( MMUCR, 0 );
   161 }
   163 void MMU_save_state( FILE *f )
   164 {
   165     mmu_read_urc();   
   166     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   167     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   168     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   169     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   170     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   171     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   172 }
   174 int MMU_load_state( FILE *f )
   175 {
   176     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   177         return 1;
   178     }
   179     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   180         return 1;
   181     }
   182     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   183         return 1;
   184     }
   185     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   186         return 1;
   187     }
   188     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   189         return 1;
   190     }
   191     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   192         return 1;
   193     }
   195     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   196     mmu_urc_overflow = mmu_urc >= mmu_urb;
   197     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   198     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   199     return 0;
   200 }
   202 /**
   203  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   204  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   205  */
   206 void MMU_ldtlb()
   207 {
   208     int urc = mmu_read_urc();
   209     if( mmu_utlb[urc].flags & TLB_VALID )
   210         mmu_utlb_remove_entry( urc );
   211     mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   212     mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   213     mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   214     mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   215     mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
   216     mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
   217     if( mmu_utlb[urc].flags & TLB_VALID )
   218         mmu_utlb_insert_entry( urc );
   219 }
   222 MMIO_REGION_READ_FN( MMU, reg )
   223 {
   224     reg &= 0xFFF;
   225     switch( reg ) {
   226     case MMUCR:
   227         return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   228     default:
   229         return MMIO_READ( MMU, reg );
   230     }
   231 }
   233 MMIO_REGION_WRITE_FN( MMU, reg, val )
   234 {
   235     uint32_t tmp;
   236     reg &= 0xFFF;
   237     switch(reg) {
   238     case SH4VER:
   239         return;
   240     case PTEH:
   241         val &= 0xFFFFFCFF;
   242         if( (val & 0xFF) != mmu_asid ) {
   243             mmu_set_tlb_asid( val&0xFF );
   244             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   245         }
   246         break;
   247     case PTEL:
   248         val &= 0x1FFFFDFF;
   249         break;
   250     case PTEA:
   251         val &= 0x0000000F;
   252         break;
   253     case TRA:
   254         val &= 0x000003FC;
   255         break;
   256     case EXPEVT:
   257     case INTEVT:
   258         val &= 0x00000FFF;
   259         break;
   260     case MMUCR:
   261         if( val & MMUCR_TI ) {
   262             mmu_invalidate_tlb();
   263         }
   264         mmu_urc = (val >> 10) & 0x3F;
   265         mmu_urb = (val >> 18) & 0x3F;
   266         if( mmu_urb == 0 ) {
   267             mmu_urb = 0x40;
   268         } else if( mmu_urc >= mmu_urb ) {
   269             mmu_urc_overflow = TRUE;
   270         }
   271         mmu_lrui = (val >> 26) & 0x3F;
   272         val &= 0x00000301;
   273         tmp = MMIO_READ( MMU, MMUCR );
   274         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   275             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   276         }
   277         if( (val ^ tmp) & (MMUCR_AT) ) {
   278             // AT flag has changed state - flush the xlt cache as all bets
   279             // are off now. We also need to force an immediate exit from the
   280             // current block
   281             mmu_set_tlb_enabled( val & MMUCR_AT );
   282             MMIO_WRITE( MMU, MMUCR, val );
   283             sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
   284             xlat_flush_cache(); // If we're not running, flush the cache anyway
   285         }
   286         break;
   287     case CCR:
   288         CCN_set_cache_control( val );
   289         val &= 0x81A7;
   290         break;
   291     case MMUUNK1:
   292         /* Note that if the high bit is set, this appears to reset the machine.
   293          * Not emulating this behaviour yet until we know why...
   294          */
   295         val &= 0x00010007;
   296         break;
   297     case QACR0:
   298     case QACR1:
   299         val &= 0x0000001C;
   300         break;
   301     case PMCR1:
   302         PMM_write_control(0, val);
   303         val &= 0x0000C13F;
   304         break;
   305     case PMCR2:
   306         PMM_write_control(1, val);
   307         val &= 0x0000C13F;
   308         break;
   309     default:
   310         break;
   311     }
   312     MMIO_WRITE( MMU, reg, val );
   313 }
   315 /********************** 1K Page handling ***********************/
   316 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   317  * effort to manage - we justify this on the basis that most programs won't
   318  * actually use 1K pages, so we may as well optimize for the common case.
   319  * 
   320  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   321  * redirects requests to the 'real' page entry. These are allocated on an
   322  * as-needed basis, and returned to the pool when all subpages are empty.
   323  */ 
   324 static void mmu_utlb_1k_init()
   325 {
   326     int i;
   327     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   328         mmu_utlb_1k_free_list[i] = i;
   329         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   330     }
   331     mmu_utlb_1k_free_index = 0;
   332 }
   334 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   335 {
   336     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   337     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
   338     return entry;
   339 }    
   341 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   342 {
   343     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   344     assert( entryNo < UTLB_ENTRY_COUNT );
   345     assert( mmu_utlb_1k_free_index > 0 );
   346     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   347 }
   350 /********************** Address space maintenance *************************/
   352 /**
   353  * MMU accessor functions just increment URC - fixup here if necessary
   354  */
   355 static int mmu_read_urc()
   356 {
   357     if( mmu_urc_overflow ) {
   358         if( mmu_urc >= 0x40 ) {
   359             mmu_urc_overflow = FALSE;
   360             mmu_urc -= 0x40;
   361             mmu_urc %= mmu_urb;
   362         }
   363     } else {
   364         mmu_urc %= mmu_urb;
   365     }
   366     return mmu_urc;
   367 }
   369 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   370 {
   371     int count = (end - start) >> 12;
   372     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   373     while( count-- > 0 ) {
   374         *ptr++ = fn;
   375     }
   376 }
   377 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   378 {
   379     int count = (end - start) >> 12;
   380     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   381     while( count-- > 0 ) {
   382         *ptr++ = fn;
   383     }
   384 }
   386 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   387 {
   388     int i;
   389     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   390         /* TLB on */
   391         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   392         sh4_address_space[(page|0xA0000000)>>12] = fn;
   393         /* Scan UTLB and update any direct-referencing entries */
   394     } else {
   395         /* Direct map to U0, P0, P1, P2, P3 */
   396         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   397             sh4_address_space[(page|i)>>12] = fn;
   398         }
   399         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   400             sh4_user_address_space[(page|i)>>12] = fn;
   401         }
   402     }
   403     return TRUE;
   404 }
   406 static void mmu_set_tlb_enabled( int tlb_on )
   407 {
   408     mem_region_fn_t *ptr, *uptr;
   409     int i;
   411     /* Reset the storequeue area */
   413     if( tlb_on ) {
   414         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   415         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   416         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   418         /* Default SQ prefetch goes to TLB miss (?) */
   419         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   420         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   421         mmu_utlb_register_all();
   422     } else {
   423         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   424             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   425         }
   426         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   427             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   428         }
   430         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   431         if( IS_STOREQUEUE_PROTECTED() ) {
   432             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   433         } else {
   434             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   435         }
   436     }
   438 }
   440 /**
   441  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   442  * anything expects to do this frequently.
   443  */
   444 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   445 {
   446     mem_region_fn_t nontlb_region;
   447     int i;
   449     if( protected ) {
   450         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   451         nontlb_region = &p4_region_storequeue_sqmd;
   452     } else {
   453         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   454         nontlb_region = &p4_region_storequeue; 
   455     }
   457     if( tlb_on ) {
   458         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   459         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   460             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   461                 mmu_utlb_insert_entry(i);
   462             }
   463         }
   464     } else {
   465         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   466     }
   468 }
   470 static void mmu_set_tlb_asid( uint32_t asid )
   471 {
   472     /* Scan for pages that need to be remapped */
   473     int i;
   474     if( IS_SV_ENABLED() ) {
   475         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   476             if( mmu_utlb[i].asid == mmu_asid && 
   477                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   478                 // Matches old ASID - unmap out
   479                 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   480                         get_tlb_size_pages(mmu_utlb[i].flags) ) )
   481                     mmu_utlb_remap_pages( FALSE, TRUE, i );
   482             }
   483         }
   484         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   485             if( mmu_utlb[i].asid == asid && 
   486                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   487                 // Matches new ASID - map in
   488                 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   489                         mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   490                         get_tlb_size_pages(mmu_utlb[i].flags) );
   491             }
   492         }
   493     } else {
   494         // Remap both Priv+user pages
   495         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   496             if( mmu_utlb[i].asid == mmu_asid &&
   497                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   498                 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   499                         get_tlb_size_pages(mmu_utlb[i].flags) ) )
   500                     mmu_utlb_remap_pages( TRUE, TRUE, i );
   501             }
   502         }
   503         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   504             if( mmu_utlb[i].asid == asid &&
   505                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   506                 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   507                         mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   508                         get_tlb_size_pages(mmu_utlb[i].flags) );  
   509             }
   510         }
   511     }
   513     mmu_asid = asid;
   514 }
   516 static uint32_t get_tlb_size_mask( uint32_t flags )
   517 {
   518     switch( flags & TLB_SIZE_MASK ) {
   519     case TLB_SIZE_1K: return MASK_1K;
   520     case TLB_SIZE_4K: return MASK_4K;
   521     case TLB_SIZE_64K: return MASK_64K;
   522     case TLB_SIZE_1M: return MASK_1M;
   523     default: return 0; /* Unreachable */
   524     }
   525 }
   526 static uint32_t get_tlb_size_pages( uint32_t flags )
   527 {
   528     switch( flags & TLB_SIZE_MASK ) {
   529     case TLB_SIZE_1K: return 0;
   530     case TLB_SIZE_4K: return 1;
   531     case TLB_SIZE_64K: return 16;
   532     case TLB_SIZE_1M: return 256;
   533     default: return 0; /* Unreachable */
   534     }
   535 }
   537 /**
   538  * Add a new TLB entry mapping to the address space table. If any of the pages
   539  * are already mapped, they are mapped to the TLB multi-hit page instead.
   540  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   541  */ 
   542 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   543 {
   544     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   545     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   546     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   547     struct utlb_default_regions *userdefs = privdefs;    
   549     gboolean mapping_ok = TRUE;
   550     int i;
   552     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   553         /* Storequeue mapping */
   554         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   555         userdefs = mmu_user_storequeue_regions;
   556     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   557         user_page = NULL; /* No user access to P3 region */
   558     } else if( start_addr >= 0x80000000 ) {
   559         return TRUE; // No mapping - legal but meaningless
   560     }
   562     if( npages == 0 ) {
   563         struct utlb_1k_entry *ent;
   564         int i, idx = (start_addr >> 10) & 0x03;
   565         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   566             ent = (struct utlb_1k_entry *)*ptr;
   567         } else {
   568             ent = mmu_utlb_1k_alloc();
   569             /* New 1K struct - init to previous contents of region */
   570             for( i=0; i<4; i++ ) {
   571                 ent->subpages[i] = *ptr;
   572                 ent->user_subpages[i] = *uptr;
   573             }
   574             *ptr = &ent->fn;
   575             *uptr = &ent->user_fn;
   576         }
   578         if( priv_page != NULL ) {
   579             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   580                 ent->subpages[idx] = priv_page;
   581             } else {
   582                 mapping_ok = FALSE;
   583                 ent->subpages[idx] = privdefs->tlb_multihit;
   584             }
   585         }
   586         if( user_page != NULL ) {
   587             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   588                 ent->user_subpages[idx] = user_page;
   589             } else {
   590                 mapping_ok = FALSE;
   591                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   592             }
   593         }
   595     } else {
   596         if( priv_page != NULL ) {
   597             /* Privileged mapping only */
   598             for( i=0; i<npages; i++ ) {
   599                 if( *ptr == privdefs->tlb_miss ) {
   600                     *ptr++ = priv_page;
   601                 } else {
   602                     mapping_ok = FALSE;
   603                     *ptr++ = privdefs->tlb_multihit;
   604                 }
   605             }
   606         }
   607         if( user_page != NULL ) {
   608             /* User mapping only (eg ASID change remap w/ SV=1) */
   609             for( i=0; i<npages; i++ ) {
   610                 if( *uptr == userdefs->tlb_miss ) {
   611                     *uptr++ = user_page;
   612                 } else {
   613                     mapping_ok = FALSE;
   614                     *uptr++ = userdefs->tlb_multihit;
   615                 }
   616             }        
   617         }
   618     }
   620     return mapping_ok;
   621 }
   623 /**
   624  * Remap any pages within the region covered by entryNo, but not including 
   625  * entryNo itself. This is used to reestablish pages that were previously
   626  * covered by a multi-hit exception region when one of the pages is removed.
   627  */
   628 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   629 {
   630     int mask = mmu_utlb[entryNo].mask;
   631     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   632     int i;
   634     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   635         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   636             /* Overlapping region */
   637             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   638             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   639             uint32_t start_addr;
   640             int npages;
   642             if( mmu_utlb[i].mask >= mask ) {
   643                 /* entry is no larger than the area we're replacing - map completely */
   644                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   645                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   646             } else {
   647                 /* Otherwise map subset - region covered by removed page */
   648                 start_addr = remap_addr;
   649                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   650             }
   652             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   653                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   654             } else if( IS_SV_ENABLED() ) {
   655                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   656             }
   658         }
   659     }
   660 }
   662 /**
   663  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   664  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   665  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   666  */
   667 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   668 {
   669     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   670     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   671     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   672     struct utlb_default_regions *userdefs = privdefs;
   674     gboolean unmapping_ok = TRUE;
   675     int i;
   677     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   678         /* Storequeue mapping */
   679         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   680         userdefs = mmu_user_storequeue_regions;
   681     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   682         unmap_user = FALSE;
   683     } else if( start_addr >= 0x80000000 ) {
   684         return TRUE; // No mapping - legal but meaningless
   685     }
   687     if( npages == 0 ) { // 1K page
   688         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   689         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   690         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   691         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   692             unmapping_ok = FALSE;
   693         }
   694         if( unmap_priv )
   695             ent->subpages[idx] = privdefs->tlb_miss;
   696         if( unmap_user )
   697             ent->user_subpages[idx] = userdefs->tlb_miss;
   699         /* If all 4 subpages have the same content, merge them together and
   700          * release the 1K entry
   701          */
   702         mem_region_fn_t priv_page = ent->subpages[0];
   703         mem_region_fn_t user_page = ent->user_subpages[0];
   704         for( i=1; i<4; i++ ) {
   705             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   706                 mergeable = 0;
   707                 break;
   708             }
   709         }
   710         if( mergeable ) {
   711             mmu_utlb_1k_free(ent);
   712             *ptr = priv_page;
   713             *uptr = user_page;
   714         }
   715     } else {
   716         if( unmap_priv ) {
   717             /* Privileged (un)mapping */
   718             for( i=0; i<npages; i++ ) {
   719                 if( *ptr == privdefs->tlb_multihit ) {
   720                     unmapping_ok = FALSE;
   721                 }
   722                 *ptr++ = privdefs->tlb_miss;
   723             }
   724         }
   725         if( unmap_user ) {
   726             /* User (un)mapping */
   727             for( i=0; i<npages; i++ ) {
   728                 if( *uptr == userdefs->tlb_multihit ) {
   729                     unmapping_ok = FALSE;
   730                 }
   731                 *uptr++ = userdefs->tlb_miss;
   732             }            
   733         }
   734     }
   736     return unmapping_ok;
   737 }
   739 static void mmu_utlb_insert_entry( int entry )
   740 {
   741     struct utlb_entry *ent = &mmu_utlb[entry];
   742     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   743     mem_region_fn_t upage;
   744     sh4addr_t start_addr = ent->vpn & ent->mask;
   745     int npages = get_tlb_size_pages(ent->flags);
   747     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   748         /* Store queue mappings are a bit different - normal access is fixed to
   749          * the store queue register block, and we only map prefetches through
   750          * the TLB 
   751          */
   752         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   754         if( (ent->flags & TLB_USERMODE) == 0 ) {
   755             upage = mmu_user_storequeue_regions->tlb_prot;
   756         } else if( IS_STOREQUEUE_PROTECTED() ) {
   757             upage = &p4_region_storequeue_sqmd;
   758         } else {
   759             upage = page;
   760         }
   762     }  else {
   764         if( (ent->flags & TLB_USERMODE) == 0 ) {
   765             upage = &mem_region_tlb_protected;
   766         } else {        
   767             upage = page;
   768         }
   770         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   771             page->write_long = (mem_write_fn_t)tlb_protected_write;
   772             page->write_word = (mem_write_fn_t)tlb_protected_write;
   773             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   774             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   775             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   776         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   777             page->write_long = (mem_write_fn_t)tlb_initial_write;
   778             page->write_word = (mem_write_fn_t)tlb_initial_write;
   779             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   780             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   781             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   782         } else {
   783             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   784         }
   785     }
   787     mmu_utlb_pages[entry].user_fn = upage;
   789     /* Is page visible? */
   790     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   791         mmu_utlb_map_pages( page, upage, start_addr, npages );
   792     } else if( IS_SV_ENABLED() ) {
   793         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   794     }
   795 }
   797 static void mmu_utlb_remove_entry( int entry )
   798 {
   799     int i, j;
   800     struct utlb_entry *ent = &mmu_utlb[entry];
   801     sh4addr_t start_addr = ent->vpn&ent->mask;
   802     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   803     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   804     gboolean unmap_user;
   805     int npages = get_tlb_size_pages(ent->flags);
   807     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   808         unmap_user = TRUE;
   809     } else if( IS_SV_ENABLED() ) {
   810         unmap_user = FALSE;
   811     } else {
   812         return; // Not mapped
   813     }
   815     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   817     if( !clean_unmap ) {
   818         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   819     }
   820 }
   822 static void mmu_utlb_register_all()
   823 {
   824     int i;
   825     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   826         if( mmu_utlb[i].flags & TLB_VALID ) 
   827             mmu_utlb_insert_entry( i );
   828     }
   829 }
   831 static void mmu_invalidate_tlb()
   832 {
   833     int i;
   834     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   835         mmu_itlb[i].flags &= (~TLB_VALID);
   836     }
   837     if( IS_TLB_ENABLED() ) {
   838         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   839             if( mmu_utlb[i].flags & TLB_VALID ) {
   840                 mmu_utlb_remove_entry( i );
   841             }
   842         }
   843     }
   844     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   845         mmu_utlb[i].flags &= (~TLB_VALID);
   846     }
   847 }
   849 /******************************************************************************/
   850 /*                        MMU TLB address translation                         */
   851 /******************************************************************************/
   853 /**
   854  * Translate a 32-bit address into a UTLB entry number. Does not check for
   855  * page protection etc.
   856  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   857  */
   858 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   859 {
   860     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   861     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   862         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   863     } else if( fn == &mem_region_tlb_multihit ) {
   864         return -2;
   865     } else {
   866         return -1;
   867     }
   868 }
   871 /**
   872  * Perform the actual utlb lookup w/ asid matching.
   873  * Possible utcomes are:
   874  *   0..63 Single match - good, return entry found
   875  *   -1 No match - raise a tlb data miss exception
   876  *   -2 Multiple matches - raise a multi-hit exception (reset)
   877  * @param vpn virtual address to resolve
   878  * @return the resultant UTLB entry, or an error.
   879  */
   880 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   881 {
   882     int result = -1;
   883     unsigned int i;
   885     mmu_urc++;
   886     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   887         mmu_urc = 0;
   888     }
   890     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   891         if( (mmu_utlb[i].flags & TLB_VALID) &&
   892                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   893                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   894             if( result != -1 ) {
   895                 return -2;
   896             }
   897             result = i;
   898         }
   899     }
   900     return result;
   901 }
   903 /**
   904  * Perform the actual utlb lookup matching on vpn only
   905  * Possible utcomes are:
   906  *   0..63 Single match - good, return entry found
   907  *   -1 No match - raise a tlb data miss exception
   908  *   -2 Multiple matches - raise a multi-hit exception (reset)
   909  * @param vpn virtual address to resolve
   910  * @return the resultant UTLB entry, or an error.
   911  */
   912 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   913 {
   914     int result = -1;
   915     unsigned int i;
   917     mmu_urc++;
   918     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   919         mmu_urc = 0;
   920     }
   922     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   923         if( (mmu_utlb[i].flags & TLB_VALID) &&
   924                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   925             if( result != -1 ) {
   926                 return -2;
   927             }
   928             result = i;
   929         }
   930     }
   932     return result;
   933 }
   935 /**
   936  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   937  * @return the number (0-3) of the replaced entry.
   938  */
   939 static int inline mmu_itlb_update_from_utlb( int entryNo )
   940 {
   941     int replace;
   942     /* Determine entry to replace based on lrui */
   943     if( (mmu_lrui & 0x38) == 0x38 ) {
   944         replace = 0;
   945         mmu_lrui = mmu_lrui & 0x07;
   946     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   947         replace = 1;
   948         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   949     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   950         replace = 2;
   951         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   952     } else { // Note - gets invalid entries too
   953         replace = 3;
   954         mmu_lrui = (mmu_lrui | 0x0B);
   955     }
   957     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   958     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   959     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   960     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   961     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   962     return replace;
   963 }
   965 /**
   966  * Perform the actual itlb lookup w/ asid protection
   967  * Possible utcomes are:
   968  *   0..63 Single match - good, return entry found
   969  *   -1 No match - raise a tlb data miss exception
   970  *   -2 Multiple matches - raise a multi-hit exception (reset)
   971  * @param vpn virtual address to resolve
   972  * @return the resultant ITLB entry, or an error.
   973  */
   974 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   975 {
   976     int result = -1;
   977     unsigned int i;
   979     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   980         if( (mmu_itlb[i].flags & TLB_VALID) &&
   981                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   982                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   983             if( result != -1 ) {
   984                 return -2;
   985             }
   986             result = i;
   987         }
   988     }
   990     if( result == -1 ) {
   991         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
   992         if( utlbEntry < 0 ) {
   993             return utlbEntry;
   994         } else {
   995             return mmu_itlb_update_from_utlb( utlbEntry );
   996         }
   997     }
   999     switch( result ) {
  1000     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1001     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1002     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1003     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1006     return result;
  1009 /**
  1010  * Perform the actual itlb lookup on vpn only
  1011  * Possible utcomes are:
  1012  *   0..63 Single match - good, return entry found
  1013  *   -1 No match - raise a tlb data miss exception
  1014  *   -2 Multiple matches - raise a multi-hit exception (reset)
  1015  * @param vpn virtual address to resolve
  1016  * @return the resultant ITLB entry, or an error.
  1017  */
  1018 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
  1020     int result = -1;
  1021     unsigned int i;
  1023     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1024         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1025                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1026             if( result != -1 ) {
  1027                 return -2;
  1029             result = i;
  1033     if( result == -1 ) {
  1034         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1035         if( utlbEntry < 0 ) {
  1036             return utlbEntry;
  1037         } else {
  1038             return mmu_itlb_update_from_utlb( utlbEntry );
  1042     switch( result ) {
  1043     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1044     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1045     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1046     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1049     return result;
  1052 /**
  1053  * Update the icache for an untranslated address
  1054  */
  1055 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1057     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1058         /* Main ram */
  1059         sh4_icache.page_vma = addr & 0xFF000000;
  1060         sh4_icache.page_ppa = 0x0C000000;
  1061         sh4_icache.mask = 0xFF000000;
  1062         sh4_icache.page = dc_main_ram;
  1063     } else if( (addr & 0x1FE00000) == 0 ) {
  1064         /* BIOS ROM */
  1065         sh4_icache.page_vma = addr & 0xFFE00000;
  1066         sh4_icache.page_ppa = 0;
  1067         sh4_icache.mask = 0xFFE00000;
  1068         sh4_icache.page = dc_boot_rom;
  1069     } else {
  1070         /* not supported */
  1071         sh4_icache.page_vma = -1;
  1075 /**
  1076  * Update the sh4_icache structure to describe the page(s) containing the
  1077  * given vma. If the address does not reference a RAM/ROM region, the icache
  1078  * will be invalidated instead.
  1079  * If AT is on, this method will raise TLB exceptions normally
  1080  * (hence this method should only be used immediately prior to execution of
  1081  * code), and otherwise will set the icache according to the matching TLB entry.
  1082  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1083  * the icache.
  1084  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1085  * if an exception was raised.
  1086  */
  1087 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1089     int entryNo;
  1090     if( IS_SH4_PRIVMODE()  ) {
  1091         if( addr & 0x80000000 ) {
  1092             if( addr < 0xC0000000 ) {
  1093                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1094                 mmu_update_icache_phys(addr);
  1095                 return TRUE;
  1096             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1097                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1098                 return FALSE;
  1102         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1103         if( (mmucr & MMUCR_AT) == 0 ) {
  1104             mmu_update_icache_phys(addr);
  1105             return TRUE;
  1108         if( (mmucr & MMUCR_SV) == 0 )
  1109         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1110         else
  1111         	entryNo = mmu_itlb_lookup_vpn( addr );
  1112     } else {
  1113         if( addr & 0x80000000 ) {
  1114             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1115             return FALSE;
  1118         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1119         if( (mmucr & MMUCR_AT) == 0 ) {
  1120             mmu_update_icache_phys(addr);
  1121             return TRUE;
  1124         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1126         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1127             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1128             return FALSE;
  1132     switch(entryNo) {
  1133     case -1:
  1134     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1135     return FALSE;
  1136     case -2:
  1137     RAISE_TLB_MULTIHIT_ERROR(addr);
  1138     return FALSE;
  1139     default:
  1140         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1141         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1142         if( sh4_icache.page == NULL ) {
  1143             sh4_icache.page_vma = -1;
  1144         } else {
  1145             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1146             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1148         return TRUE;
  1152 /**
  1153  * Translate address for disassembly purposes (ie performs an instruction
  1154  * lookup) - does not raise exceptions or modify any state, and ignores
  1155  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1156  * on translation failure.
  1157  */
  1158 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1160     if( vma & 0x80000000 ) {
  1161         if( vma < 0xC0000000 ) {
  1162             /* P1, P2 and P4 regions are pass-through (no translation) */
  1163             return VMA_TO_EXT_ADDR(vma);
  1164         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1165             /* Not translatable */
  1166             return MMU_VMA_ERROR;
  1170     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1171     if( (mmucr & MMUCR_AT) == 0 ) {
  1172         return VMA_TO_EXT_ADDR(vma);
  1175     int entryNo = mmu_itlb_lookup_vpn( vma );
  1176     if( entryNo == -2 ) {
  1177         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1179     if( entryNo < 0 ) {
  1180         return MMU_VMA_ERROR;
  1181     } else {
  1182         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1183         (vma & (~mmu_itlb[entryNo].mask));
  1187 /********************** TLB Direct-Access Regions ***************************/
  1188 #ifdef HAVE_FRAME_ADDRESS
  1189 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; } while(0)
  1190 #else
  1191 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1192 #endif
  1195 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1197 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1199     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1200     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1203 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1205     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1206     ent->vpn = val & 0xFFFFFC00;
  1207     ent->asid = val & 0x000000FF;
  1208     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1211 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1213     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1214     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1217 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1219     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1220     ent->ppn = val & 0x1FFFFC00;
  1221     ent->flags = val & 0x00001DA;
  1222     ent->mask = get_tlb_size_mask(val);
  1223     if( ent->ppn >= 0x1C000000 )
  1224         ent->ppn |= 0xE0000000;
  1227 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1228 #define UTLB_ASSOC(addr) (addr&0x80)
  1229 #define UTLB_DATA2(addr) (addr&0x00800000)
  1231 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1233     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1234     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1235     ((ent->flags & TLB_DIRTY)<<7);
  1237 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1239     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1240     if( UTLB_DATA2(addr) ) {
  1241         return ent->pcmcia;
  1242     } else {
  1243         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1247 /**
  1248  * Find a UTLB entry for the associative TLB write - same as the normal
  1249  * lookup but ignores the valid bit.
  1250  */
  1251 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1253     int result = -1;
  1254     unsigned int i;
  1255     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1256         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1257                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1258                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1259             if( result != -1 ) {
  1260                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1261                 return -2;
  1263             result = i;
  1266     return result;
  1269 /**
  1270  * Find a ITLB entry for the associative TLB write - same as the normal
  1271  * lookup but ignores the valid bit.
  1272  */
  1273 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1275     int result = -1;
  1276     unsigned int i;
  1277     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1278         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1279                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1280                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1281             if( result != -1 ) {
  1282                 return -2;
  1284             result = i;
  1287     return result;
  1290 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1292     if( UTLB_ASSOC(addr) ) {
  1293         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1294         if( utlb >= 0 ) {
  1295             struct utlb_entry *ent = &mmu_utlb[utlb];
  1296             uint32_t old_flags = ent->flags;
  1297             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1298             ent->flags |= (val & TLB_VALID);
  1299             ent->flags |= ((val & 0x200)>>7);
  1300             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1301                 if( old_flags & TLB_VALID )
  1302                     mmu_utlb_remove_entry( utlb );
  1303                 if( ent->flags & TLB_VALID )
  1304                     mmu_utlb_insert_entry( utlb );
  1308         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1309         if( itlb >= 0 ) {
  1310             struct itlb_entry *ent = &mmu_itlb[itlb];
  1311             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1314         if( itlb == -2 || utlb == -2 ) {
  1315             RAISE_TLB_MULTIHIT_ERROR(addr);
  1316             EXCEPTION_EXIT();
  1317             return;
  1319     } else {
  1320         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1321         if( ent->flags & TLB_VALID ) 
  1322             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1323         ent->vpn = (val & 0xFFFFFC00);
  1324         ent->asid = (val & 0xFF);
  1325         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1326         ent->flags |= (val & TLB_VALID);
  1327         ent->flags |= ((val & 0x200)>>7);
  1328         if( ent->flags & TLB_VALID ) 
  1329             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1333 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1335     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1336     if( UTLB_DATA2(addr) ) {
  1337         ent->pcmcia = val & 0x0000000F;
  1338     } else {
  1339         if( ent->flags & TLB_VALID ) 
  1340             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1341         ent->ppn = (val & 0x1FFFFC00);
  1342         ent->flags = (val & 0x000001FF);
  1343         ent->mask = get_tlb_size_mask(val);
  1344         if( ent->flags & TLB_VALID ) 
  1345             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1349 struct mem_region_fn p4_region_itlb_addr = {
  1350         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1351         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1352         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1353         unmapped_read_burst, unmapped_write_burst,
  1354         unmapped_prefetch };
  1355 struct mem_region_fn p4_region_itlb_data = {
  1356         mmu_itlb_data_read, mmu_itlb_data_write,
  1357         mmu_itlb_data_read, mmu_itlb_data_write,
  1358         mmu_itlb_data_read, mmu_itlb_data_write,
  1359         unmapped_read_burst, unmapped_write_burst,
  1360         unmapped_prefetch };
  1361 struct mem_region_fn p4_region_utlb_addr = {
  1362         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1363         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1364         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1365         unmapped_read_burst, unmapped_write_burst,
  1366         unmapped_prefetch };
  1367 struct mem_region_fn p4_region_utlb_data = {
  1368         mmu_utlb_data_read, mmu_utlb_data_write,
  1369         mmu_utlb_data_read, mmu_utlb_data_write,
  1370         mmu_utlb_data_read, mmu_utlb_data_write,
  1371         unmapped_read_burst, unmapped_write_burst,
  1372         unmapped_prefetch };
  1374 /********************** Error regions **************************/
  1376 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1378     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1379     EXCEPTION_EXIT();
  1382 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1384     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1385     EXCEPTION_EXIT();
  1388 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1390     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1391     EXCEPTION_EXIT();
  1394 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1396     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1397     EXCEPTION_EXIT();
  1400 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1402     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1403     EXCEPTION_EXIT();
  1406 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1408     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1409     EXCEPTION_EXIT();
  1412 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1414     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1415     EXCEPTION_EXIT();
  1416     return 0; 
  1419 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1421     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1422     EXCEPTION_EXIT();
  1423     return 0;
  1426 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1428     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1429     EXCEPTION_EXIT();
  1432 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1434     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1435     EXCEPTION_EXIT();
  1438 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1440     sh4_raise_tlb_multihit(addr);
  1441     EXCEPTION_EXIT();
  1442     return 0; 
  1445 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1447     sh4_raise_tlb_multihit(addr);
  1448     EXCEPTION_EXIT();
  1449     return 0; 
  1451 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1453     sh4_raise_tlb_multihit(addr);
  1454     EXCEPTION_EXIT();
  1457 /**
  1458  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1459  */
  1460 struct mem_region_fn mem_region_address_error = {
  1461         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1462         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1463         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1464         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1465         unmapped_prefetch };
  1467 struct mem_region_fn mem_region_tlb_miss = {
  1468         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1469         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1470         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1471         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1472         unmapped_prefetch };
  1474 struct mem_region_fn mem_region_tlb_protected = {
  1475         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1476         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1477         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1478         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1479         unmapped_prefetch };
  1481 struct mem_region_fn mem_region_tlb_multihit = {
  1482         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1483         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1484         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1485         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1486         (mem_prefetch_fn_t)tlb_multi_hit_read };
  1489 /* Store-queue regions */
  1490 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1491  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1492  * some cases), in contrast to the ordinary fields above.
  1494  * There is probably a simpler way to do this.
  1495  */
  1497 struct mem_region_fn p4_region_storequeue = { 
  1498         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1499         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1500         unmapped_read_long, unmapped_write_long,
  1501         unmapped_read_burst, unmapped_write_burst,
  1502         ccn_storequeue_prefetch }; 
  1504 struct mem_region_fn p4_region_storequeue_miss = { 
  1505         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1506         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1507         unmapped_read_long, unmapped_write_long,
  1508         unmapped_read_burst, unmapped_write_burst,
  1509         (mem_prefetch_fn_t)tlb_miss_read }; 
  1511 struct mem_region_fn p4_region_storequeue_multihit = { 
  1512         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1513         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1514         unmapped_read_long, unmapped_write_long,
  1515         unmapped_read_burst, unmapped_write_burst,
  1516         (mem_prefetch_fn_t)tlb_multi_hit_read }; 
  1518 struct mem_region_fn p4_region_storequeue_protected = {
  1519         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1520         unmapped_read_long, unmapped_write_long,
  1521         unmapped_read_long, unmapped_write_long,
  1522         unmapped_read_burst, unmapped_write_burst,
  1523         (mem_prefetch_fn_t)tlb_protected_read };
  1525 struct mem_region_fn p4_region_storequeue_sqmd = {
  1526         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1527         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1528         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1529         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1530         (mem_prefetch_fn_t)address_error_read };        
  1532 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1533         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1534         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1535         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1536         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1537         (mem_prefetch_fn_t)tlb_miss_read }; 
  1539 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1540         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1541         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1542         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1543         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1544         (mem_prefetch_fn_t)tlb_multi_hit_read };        
  1546 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1547         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1548         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1549         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1550         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1551         (mem_prefetch_fn_t)tlb_protected_read };
.