Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 946:d41ee7994db7
prev943:9a277733eafa
next948:545c85cc56f1
author nkeynes
date Tue Jan 06 01:58:08 2009 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Fully integrate SQ with the new address space code - added additional 'prefetch'
memory accessor. TLB is utterly untested, but non-TLB at least still works.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) \
    31     MMIO_WRITE(MMU, TEA, vpn); \
    32     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    33     sh4_raise_tlb_exception(code);
    34 #define RAISE_MEM_ERROR(code, vpn) \
    35     MMIO_WRITE(MMU, TEA, vpn); \
    36     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    37     sh4_raise_exception(code);
    38 #define RAISE_TLB_MULTIHIT_ERROR(vpn) \
    39     sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    43 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    44 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    46 /* Primary address space (used directly by SH4 cores) */
    47 mem_region_fn_t *sh4_address_space;
    48 mem_region_fn_t *sh4_user_address_space;
    50 /* Accessed from the UTLB accessor methods */
    51 uint32_t mmu_urc;
    52 uint32_t mmu_urb;
    54 /* Module globals */
    55 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    56 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    57 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    58 static uint32_t mmu_lrui;
    59 static uint32_t mmu_asid; // current asid
    60 static struct utlb_default_regions *mmu_user_storequeue_regions;
    62 /* Structures for 1K page handling */
    63 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    64 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    65 static int mmu_utlb_1k_free_index;
    68 /* Function prototypes */
    69 static void mmu_invalidate_tlb();
    70 static void mmu_utlb_register_all();
    71 static void mmu_utlb_remove_entry(int);
    72 static void mmu_utlb_insert_entry(int);
    73 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    74 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    75 static void mmu_set_tlb_enabled( int tlb_on );
    76 static void mmu_set_tlb_asid( uint32_t asid );
    77 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    78 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    79 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    80 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    81 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    82 static void mmu_utlb_1k_init();
    83 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    84 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    86 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    87 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    88 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    89 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    90 static uint32_t get_tlb_size_mask( uint32_t flags );
    91 static uint32_t get_tlb_size_pages( uint32_t flags );
    93 #define DEFAULT_REGIONS 0
    94 #define DEFAULT_STOREQUEUE_REGIONS 1
    95 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    97 static struct utlb_default_regions mmu_default_regions[3] = {
    98         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    99         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
   100         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
   102 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
   104 /*********************** Module public functions ****************************/
   106 /**
   107  * Allocate memory for the address space maps, and initialize them according
   108  * to the default (reset) values. (TLB is disabled by default)
   109  */
   111 void MMU_init()
   112 {
   113     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   114     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   115     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   117     mmu_set_tlb_enabled(0);
   118     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   119     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   121     /* Setup P4 tlb/cache access regions */
   122     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   123     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   124     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   125     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   126     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   127     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   128     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   129     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   130     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   131     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   132     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   134     /* Setup P4 control region */
   135     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   136     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   137     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   138     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   139     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   140     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   141     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   142     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   143     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   144     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   145     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   146     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   147     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   149     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   150     mmu_utlb_1k_init();
   152     /* Ensure the code regions are executable */
   153     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   154     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   155 }
   157 void MMU_reset()
   158 {
   159     mmio_region_MMU_write( CCR, 0 );
   160     mmio_region_MMU_write( MMUCR, 0 );
   161 }
   163 void MMU_save_state( FILE *f )
   164 {
   165     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   166     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   167     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   168     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   169     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   170     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   171 }
   173 int MMU_load_state( FILE *f )
   174 {
   175     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   176         return 1;
   177     }
   178     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   179         return 1;
   180     }
   181     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   182         return 1;
   183     }
   184     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   185         return 1;
   186     }
   187     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   188         return 1;
   189     }
   190     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   191         return 1;
   192     }
   194     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   195     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   196     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   197     return 0;
   198 }
   200 /**
   201  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   202  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   203  */
   204 void MMU_ldtlb()
   205 {
   206     mmu_urc %= mmu_urb;
   207     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   208         mmu_utlb_remove_entry( mmu_urc );
   209     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   210     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   211     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   212     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   213     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   214     mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
   215     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   216         mmu_utlb_insert_entry( mmu_urc );
   217 }
   220 MMIO_REGION_READ_FN( MMU, reg )
   221 {
   222     reg &= 0xFFF;
   223     switch( reg ) {
   224     case MMUCR:
   225         mmu_urc %= mmu_urb;
   226         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   227     default:
   228         return MMIO_READ( MMU, reg );
   229     }
   230 }
   232 MMIO_REGION_WRITE_FN( MMU, reg, val )
   233 {
   234     uint32_t tmp;
   235     reg &= 0xFFF;
   236     switch(reg) {
   237     case SH4VER:
   238         return;
   239     case PTEH:
   240         val &= 0xFFFFFCFF;
   241         if( (val & 0xFF) != mmu_asid ) {
   242             mmu_set_tlb_asid( val&0xFF );
   243             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   244         }
   245         break;
   246     case PTEL:
   247         val &= 0x1FFFFDFF;
   248         break;
   249     case PTEA:
   250         val &= 0x0000000F;
   251         break;
   252     case TRA:
   253         val &= 0x000003FC;
   254         break;
   255     case EXPEVT:
   256     case INTEVT:
   257         val &= 0x00000FFF;
   258         break;
   259     case MMUCR:
   260         if( val & MMUCR_TI ) {
   261             mmu_invalidate_tlb();
   262         }
   263         mmu_urc = (val >> 10) & 0x3F;
   264         mmu_urb = (val >> 18) & 0x3F;
   265         if( mmu_urb == 0 ) {
   266             mmu_urb = 0x40;
   267         }
   268         mmu_lrui = (val >> 26) & 0x3F;
   269         val &= 0x00000301;
   270         tmp = MMIO_READ( MMU, MMUCR );
   271         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   272             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   273         }
   274         if( (val ^ tmp) & (MMUCR_AT) ) {
   275             // AT flag has changed state - flush the xlt cache as all bets
   276             // are off now. We also need to force an immediate exit from the
   277             // current block
   278             mmu_set_tlb_enabled( val & MMUCR_AT );
   279             MMIO_WRITE( MMU, MMUCR, val );
   280             sh4_flush_icache();
   281         }
   282         break;
   283     case CCR:
   284         CCN_set_cache_control( val );
   285         val &= 0x81A7;
   286         break;
   287     case MMUUNK1:
   288         /* Note that if the high bit is set, this appears to reset the machine.
   289          * Not emulating this behaviour yet until we know why...
   290          */
   291         val &= 0x00010007;
   292         break;
   293     case QACR0:
   294     case QACR1:
   295         val &= 0x0000001C;
   296         break;
   297     case PMCR1:
   298         PMM_write_control(0, val);
   299         val &= 0x0000C13F;
   300         break;
   301     case PMCR2:
   302         PMM_write_control(1, val);
   303         val &= 0x0000C13F;
   304         break;
   305     default:
   306         break;
   307     }
   308     MMIO_WRITE( MMU, reg, val );
   309 }
   311 /********************** 1K Page handling ***********************/
   312 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   313  * effort to manage - we justify this on the basis that most programs won't
   314  * actually use 1K pages, so we may as well optimize for the common case.
   315  * 
   316  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   317  * redirects requests to the 'real' page entry. These are allocated on an
   318  * as-needed basis, and returned to the pool when all subpages are empty.
   319  */ 
   320 static void mmu_utlb_1k_init()
   321 {
   322     int i;
   323     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   324         mmu_utlb_1k_free_list[i] = i;
   325         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   326     }
   327     mmu_utlb_1k_free_index = 0;
   328 }
   330 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   331 {
   332     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   333     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
   334     return entry;
   335 }    
   337 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   338 {
   339     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   340     assert( entryNo < UTLB_ENTRY_COUNT );
   341     assert( mmu_utlb_1k_free_index > 0 );
   342     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   343 }
   346 /********************** Address space maintenance *************************/
   348 /**
   349  * MMU accessor functions just increment URC - fixup here if necessary
   350  */
   351 static inline void mmu_urc_fixup()
   352 {
   353    mmu_urc %= mmu_urb; 
   354 }
   356 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   357 {
   358     int count = (end - start) >> 12;
   359     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   360     while( count-- > 0 ) {
   361         *ptr++ = fn;
   362     }
   363 }
   364 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   365 {
   366     int count = (end - start) >> 12;
   367     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   368     while( count-- > 0 ) {
   369         *ptr++ = fn;
   370     }
   371 }
   373 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   374 {
   375     int i;
   376     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   377         /* TLB on */
   378         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   379         sh4_address_space[(page|0xA0000000)>>12] = fn;
   380         /* Scan UTLB and update any direct-referencing entries */
   381     } else {
   382         /* Direct map to U0, P0, P1, P2, P3 */
   383         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   384             sh4_address_space[(page|i)>>12] = fn;
   385         }
   386         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   387             sh4_user_address_space[(page|i)>>12] = fn;
   388         }
   389     }
   390 }
   392 static void mmu_set_tlb_enabled( int tlb_on )
   393 {
   394     mem_region_fn_t *ptr, *uptr;
   395     int i;
   397     /* Reset the storequeue area */
   399     if( tlb_on ) {
   400         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   401         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   402         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   404         /* Default SQ prefetch goes to TLB miss (?) */
   405         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   406         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   407         mmu_utlb_register_all();
   408     } else {
   409         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   410             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   411         }
   412         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   413             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   414         }
   416         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   417         if( IS_STOREQUEUE_PROTECTED() ) {
   418             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   419         } else {
   420             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   421         }
   422     }
   424 }
   426 /**
   427  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   428  * anything expects to do this frequently.
   429  */
   430 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   431 {
   432     mem_region_fn_t nontlb_region;
   433     int i;
   435     if( protected ) {
   436         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   437         nontlb_region = &p4_region_storequeue_sqmd;
   438     } else {
   439         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   440         nontlb_region = &p4_region_storequeue; 
   441     }
   443     if( tlb_on ) {
   444         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   445         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   446             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   447                 mmu_utlb_insert_entry(i);
   448             }
   449         }
   450     } else {
   451         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   452     }
   454 }
   456 static void mmu_set_tlb_asid( uint32_t asid )
   457 {
   458     /* Scan for pages that need to be remapped */
   459     int i;
   460     if( IS_SV_ENABLED() ) {
   461         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   462             if( mmu_utlb[i].flags & TLB_VALID ) {
   463                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   464                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   465                         if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   466                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   467                             mmu_utlb_remap_pages( FALSE, TRUE, i );
   468                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   469                         mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   470                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   471                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   472                     }
   473                 }
   474             }
   475         }
   476     } else {
   477         // Remap both Priv+user pages
   478         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   479             if( mmu_utlb[i].flags & TLB_VALID ) {
   480                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   481                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   482                         if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   483                                 get_tlb_size_pages(mmu_utlb[i].flags) ) )
   484                             mmu_utlb_remap_pages( TRUE, TRUE, i );
   485                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   486                         mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   487                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   488                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   489                     }
   490                 }
   491             }
   492         }
   493     }
   495     mmu_asid = asid;
   496 }
   498 static uint32_t get_tlb_size_mask( uint32_t flags )
   499 {
   500     switch( flags & TLB_SIZE_MASK ) {
   501     case TLB_SIZE_1K: return MASK_1K;
   502     case TLB_SIZE_4K: return MASK_4K;
   503     case TLB_SIZE_64K: return MASK_64K;
   504     case TLB_SIZE_1M: return MASK_1M;
   505     default: return 0; /* Unreachable */
   506     }
   507 }
   508 static uint32_t get_tlb_size_pages( uint32_t flags )
   509 {
   510     switch( flags & TLB_SIZE_MASK ) {
   511     case TLB_SIZE_1K: return 0;
   512     case TLB_SIZE_4K: return 1;
   513     case TLB_SIZE_64K: return 16;
   514     case TLB_SIZE_1M: return 256;
   515     default: return 0; /* Unreachable */
   516     }
   517 }
   519 /**
   520  * Add a new TLB entry mapping to the address space table. If any of the pages
   521  * are already mapped, they are mapped to the TLB multi-hit page instead.
   522  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   523  */ 
   524 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   525 {
   526     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   527     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   528     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   529     struct utlb_default_regions *userdefs = privdefs;    
   531     gboolean mapping_ok = TRUE;
   532     int i;
   534     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   535         /* Storequeue mapping */
   536         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   537         userdefs = mmu_user_storequeue_regions;
   538     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   539         user_page = NULL; /* No user access to P3 region */
   540     } else if( start_addr >= 0x80000000 ) {
   541         return TRUE; // No mapping - legal but meaningless
   542     }
   544     if( npages == 0 ) {
   545         struct utlb_1k_entry *ent;
   546         int i, idx = (start_addr >> 10) & 0x03;
   547         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   548             ent = (struct utlb_1k_entry *)*ptr;
   549         } else {
   550             ent = mmu_utlb_1k_alloc();
   551             /* New 1K struct - init to previous contents of region */
   552             for( i=0; i<4; i++ ) {
   553                 ent->subpages[i] = *ptr;
   554                 ent->user_subpages[i] = *uptr;
   555             }
   556             *ptr = &ent->fn;
   557             *uptr = &ent->user_fn;
   558         }
   560         if( priv_page != NULL ) {
   561             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   562                 ent->subpages[idx] = priv_page;
   563             } else {
   564                 mapping_ok = FALSE;
   565                 ent->subpages[idx] = privdefs->tlb_multihit;
   566             }
   567         }
   568         if( user_page != NULL ) {
   569             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   570                 ent->user_subpages[idx] = user_page;
   571             } else {
   572                 mapping_ok = FALSE;
   573                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   574             }
   575         }
   577     } else {
   578         if( priv_page != NULL ) {
   579             /* Privileged mapping only */
   580             for( i=0; i<npages; i++ ) {
   581                 if( *ptr == privdefs->tlb_miss ) {
   582                     *ptr++ = priv_page;
   583                 } else {
   584                     mapping_ok = FALSE;
   585                     *ptr++ = privdefs->tlb_multihit;
   586                 }
   587             }
   588         }
   589         if( user_page != NULL ) {
   590             /* User mapping only (eg ASID change remap w/ SV=1) */
   591             for( i=0; i<npages; i++ ) {
   592                 if( *uptr == userdefs->tlb_miss ) {
   593                     *uptr++ = user_page;
   594                 } else {
   595                     mapping_ok = FALSE;
   596                     *uptr++ = userdefs->tlb_multihit;
   597                 }
   598             }        
   599         }
   600     }
   602     return mapping_ok;
   603 }
   605 /**
   606  * Remap any pages within the region covered by entryNo, but not including 
   607  * entryNo itself. This is used to reestablish pages that were previously
   608  * covered by a multi-hit exception region when one of the pages is removed.
   609  */
   610 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   611 {
   612     int mask = mmu_utlb[entryNo].mask;
   613     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   614     int i;
   616     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   617         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   618             /* Overlapping region */
   619             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   620             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   621             uint32_t start_addr;
   622             int npages;
   624             if( mmu_utlb[i].mask >= mask ) {
   625                 /* entry is no larger than the area we're replacing - map completely */
   626                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   627                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   628             } else {
   629                 /* Otherwise map subset - region covered by removed page */
   630                 start_addr = remap_addr;
   631                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   632             }
   634             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   635                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   636             } else if( IS_SV_ENABLED() ) {
   637                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   638             }
   640         }
   641     }
   642 }
   644 /**
   645  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   646  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   647  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   648  */
   649 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   650 {
   651     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   652     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   653     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   654     struct utlb_default_regions *userdefs = privdefs;
   656     gboolean unmapping_ok = TRUE;
   657     int i;
   659     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   660         /* Storequeue mapping */
   661         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   662         userdefs = mmu_user_storequeue_regions;
   663     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   664         unmap_user = FALSE;
   665     } else if( start_addr >= 0x80000000 ) {
   666         return TRUE; // No mapping - legal but meaningless
   667     }
   669     if( npages == 0 ) { // 1K page
   670         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   671         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   672         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   673         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   674             unmapping_ok = FALSE;
   675         }
   676         if( unmap_priv )
   677             ent->subpages[idx] = privdefs->tlb_miss;
   678         if( unmap_user )
   679             ent->user_subpages[idx] = userdefs->tlb_miss;
   681         /* If all 4 subpages have the same content, merge them together and
   682          * release the 1K entry
   683          */
   684         mem_region_fn_t priv_page = ent->subpages[0];
   685         mem_region_fn_t user_page = ent->user_subpages[0];
   686         for( i=1; i<4; i++ ) {
   687             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   688                 mergeable = 0;
   689                 break;
   690             }
   691         }
   692         if( mergeable ) {
   693             mmu_utlb_1k_free(ent);
   694             *ptr = priv_page;
   695             *uptr = user_page;
   696         }
   697     } else {
   698         if( unmap_priv ) {
   699             /* Privileged (un)mapping */
   700             for( i=0; i<npages; i++ ) {
   701                 if( *ptr == privdefs->tlb_multihit ) {
   702                     unmapping_ok = FALSE;
   703                 }
   704                 *ptr++ = privdefs->tlb_miss;
   705             }
   706         }
   707         if( unmap_user ) {
   708             /* User (un)mapping */
   709             for( i=0; i<npages; i++ ) {
   710                 if( *uptr == userdefs->tlb_multihit ) {
   711                     unmapping_ok = FALSE;
   712                 }
   713                 *uptr++ = userdefs->tlb_miss;
   714             }            
   715         }
   716     }
   718     return unmapping_ok;
   719 }
   721 static void mmu_utlb_insert_entry( int entry )
   722 {
   723     struct utlb_entry *ent = &mmu_utlb[entry];
   724     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   725     mem_region_fn_t upage;
   726     sh4addr_t start_addr = ent->vpn & ent->mask;
   727     int npages = get_tlb_size_pages(ent->flags);
   729     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   730         /* Store queue mappings are a bit different - normal access is fixed to
   731          * the store queue register block, and we only map prefetches through
   732          * the TLB 
   733          */
   734         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   736         if( (ent->flags & TLB_USERMODE) == 0 ) {
   737             upage = mmu_user_storequeue_regions->tlb_prot;
   738         } else if( IS_STOREQUEUE_PROTECTED() ) {
   739             upage = &p4_region_storequeue_sqmd;
   740         } else {
   741             upage = page;
   742         }
   744     }  else {
   746         if( (ent->flags & TLB_USERMODE) == 0 ) {
   747             upage = &mem_region_tlb_protected;
   748         } else {        
   749             upage = page;
   750         }
   752         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   753             page->write_long = (mem_write_fn_t)tlb_protected_write;
   754             page->write_word = (mem_write_fn_t)tlb_protected_write;
   755             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   756             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   757             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   758         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   759             page->write_long = (mem_write_fn_t)tlb_initial_write;
   760             page->write_word = (mem_write_fn_t)tlb_initial_write;
   761             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   762             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   763             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   764         } else {
   765             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   766         }
   767     }
   769     mmu_utlb_pages[entry].user_fn = upage;
   771     /* Is page visible? */
   772     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   773         mmu_utlb_map_pages( page, upage, start_addr, npages );
   774     } else if( IS_SV_ENABLED() ) {
   775         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   776     }
   777 }
   779 static void mmu_utlb_remove_entry( int entry )
   780 {
   781     int i, j;
   782     struct utlb_entry *ent = &mmu_utlb[entry];
   783     sh4addr_t start_addr = ent->vpn&ent->mask;
   784     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   785     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   786     gboolean unmap_user;
   787     int npages = get_tlb_size_pages(ent->flags);
   789     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   790         unmap_user = TRUE;
   791     } else if( IS_SV_ENABLED() ) {
   792         unmap_user = FALSE;
   793     } else {
   794         return; // Not mapped
   795     }
   797     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   799     if( !clean_unmap ) {
   800         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   801     }
   802 }
   804 static void mmu_utlb_register_all()
   805 {
   806     int i;
   807     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   808         if( mmu_utlb[i].flags & TLB_VALID ) 
   809             mmu_utlb_insert_entry( i );
   810     }
   811 }
   813 static void mmu_invalidate_tlb()
   814 {
   815     int i;
   816     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   817         mmu_itlb[i].flags &= (~TLB_VALID);
   818     }
   819     if( IS_TLB_ENABLED() ) {
   820         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   821             if( mmu_utlb[i].flags & TLB_VALID ) {
   822                 mmu_utlb_remove_entry( i );
   823             }
   824         }
   825     }
   826     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   827         mmu_utlb[i].flags &= (~TLB_VALID);
   828     }
   829 }
   831 /******************************************************************************/
   832 /*                        MMU TLB address translation                         */
   833 /******************************************************************************/
   835 /**
   836  * Translate a 32-bit address into a UTLB entry number. Does not check for
   837  * page protection etc.
   838  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   839  */
   840 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   841 {
   842     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   843     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   844         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   845     } else if( fn == &mem_region_tlb_multihit ) {
   846         return -2;
   847     } else {
   848         return -1;
   849     }
   850 }
   853 /**
   854  * Perform the actual utlb lookup w/ asid matching.
   855  * Possible utcomes are:
   856  *   0..63 Single match - good, return entry found
   857  *   -1 No match - raise a tlb data miss exception
   858  *   -2 Multiple matches - raise a multi-hit exception (reset)
   859  * @param vpn virtual address to resolve
   860  * @return the resultant UTLB entry, or an error.
   861  */
   862 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   863 {
   864     int result = -1;
   865     unsigned int i;
   867     mmu_urc++;
   868     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   869         mmu_urc = 0;
   870     }
   872     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   873         if( (mmu_utlb[i].flags & TLB_VALID) &&
   874                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   875                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   876             if( result != -1 ) {
   877                 return -2;
   878             }
   879             result = i;
   880         }
   881     }
   882     return result;
   883 }
   885 /**
   886  * Perform the actual utlb lookup matching on vpn only
   887  * Possible utcomes are:
   888  *   0..63 Single match - good, return entry found
   889  *   -1 No match - raise a tlb data miss exception
   890  *   -2 Multiple matches - raise a multi-hit exception (reset)
   891  * @param vpn virtual address to resolve
   892  * @return the resultant UTLB entry, or an error.
   893  */
   894 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   895 {
   896     int result = -1;
   897     unsigned int i;
   899     mmu_urc++;
   900     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   901         mmu_urc = 0;
   902     }
   904     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   905         if( (mmu_utlb[i].flags & TLB_VALID) &&
   906                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   907             if( result != -1 ) {
   908                 return -2;
   909             }
   910             result = i;
   911         }
   912     }
   914     return result;
   915 }
   917 /**
   918  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   919  * @return the number (0-3) of the replaced entry.
   920  */
   921 static int inline mmu_itlb_update_from_utlb( int entryNo )
   922 {
   923     int replace;
   924     /* Determine entry to replace based on lrui */
   925     if( (mmu_lrui & 0x38) == 0x38 ) {
   926         replace = 0;
   927         mmu_lrui = mmu_lrui & 0x07;
   928     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   929         replace = 1;
   930         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   931     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   932         replace = 2;
   933         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   934     } else { // Note - gets invalid entries too
   935         replace = 3;
   936         mmu_lrui = (mmu_lrui | 0x0B);
   937     }
   939     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   940     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   941     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   942     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   943     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   944     return replace;
   945 }
   947 /**
   948  * Perform the actual itlb lookup w/ asid protection
   949  * Possible utcomes are:
   950  *   0..63 Single match - good, return entry found
   951  *   -1 No match - raise a tlb data miss exception
   952  *   -2 Multiple matches - raise a multi-hit exception (reset)
   953  * @param vpn virtual address to resolve
   954  * @return the resultant ITLB entry, or an error.
   955  */
   956 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   957 {
   958     int result = -1;
   959     unsigned int i;
   961     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   962         if( (mmu_itlb[i].flags & TLB_VALID) &&
   963                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   964                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   965             if( result != -1 ) {
   966                 return -2;
   967             }
   968             result = i;
   969         }
   970     }
   972     if( result == -1 ) {
   973         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
   974         if( utlbEntry < 0 ) {
   975             return utlbEntry;
   976         } else {
   977             return mmu_itlb_update_from_utlb( utlbEntry );
   978         }
   979     }
   981     switch( result ) {
   982     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   983     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   984     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   985     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   986     }
   988     return result;
   989 }
   991 /**
   992  * Perform the actual itlb lookup on vpn only
   993  * Possible utcomes are:
   994  *   0..63 Single match - good, return entry found
   995  *   -1 No match - raise a tlb data miss exception
   996  *   -2 Multiple matches - raise a multi-hit exception (reset)
   997  * @param vpn virtual address to resolve
   998  * @return the resultant ITLB entry, or an error.
   999  */
  1000 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
  1002     int result = -1;
  1003     unsigned int i;
  1005     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1006         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1007                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1008             if( result != -1 ) {
  1009                 return -2;
  1011             result = i;
  1015     if( result == -1 ) {
  1016         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1017         if( utlbEntry < 0 ) {
  1018             return utlbEntry;
  1019         } else {
  1020             return mmu_itlb_update_from_utlb( utlbEntry );
  1024     switch( result ) {
  1025     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1026     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1027     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1028     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1031     return result;
  1034 /**
  1035  * Update the icache for an untranslated address
  1036  */
  1037 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1039     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1040         /* Main ram */
  1041         sh4_icache.page_vma = addr & 0xFF000000;
  1042         sh4_icache.page_ppa = 0x0C000000;
  1043         sh4_icache.mask = 0xFF000000;
  1044         sh4_icache.page = dc_main_ram;
  1045     } else if( (addr & 0x1FE00000) == 0 ) {
  1046         /* BIOS ROM */
  1047         sh4_icache.page_vma = addr & 0xFFE00000;
  1048         sh4_icache.page_ppa = 0;
  1049         sh4_icache.mask = 0xFFE00000;
  1050         sh4_icache.page = dc_boot_rom;
  1051     } else {
  1052         /* not supported */
  1053         sh4_icache.page_vma = -1;
  1057 /**
  1058  * Update the sh4_icache structure to describe the page(s) containing the
  1059  * given vma. If the address does not reference a RAM/ROM region, the icache
  1060  * will be invalidated instead.
  1061  * If AT is on, this method will raise TLB exceptions normally
  1062  * (hence this method should only be used immediately prior to execution of
  1063  * code), and otherwise will set the icache according to the matching TLB entry.
  1064  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1065  * the icache.
  1066  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1067  * if an exception was raised.
  1068  */
  1069 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1071     int entryNo;
  1072     if( IS_SH4_PRIVMODE()  ) {
  1073         if( addr & 0x80000000 ) {
  1074             if( addr < 0xC0000000 ) {
  1075                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1076                 mmu_update_icache_phys(addr);
  1077                 return TRUE;
  1078             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1079                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1080                 return FALSE;
  1084         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1085         if( (mmucr & MMUCR_AT) == 0 ) {
  1086             mmu_update_icache_phys(addr);
  1087             return TRUE;
  1090         if( (mmucr & MMUCR_SV) == 0 )
  1091         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1092         else
  1093         	entryNo = mmu_itlb_lookup_vpn( addr );
  1094     } else {
  1095         if( addr & 0x80000000 ) {
  1096             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1097             return FALSE;
  1100         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1101         if( (mmucr & MMUCR_AT) == 0 ) {
  1102             mmu_update_icache_phys(addr);
  1103             return TRUE;
  1106         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1108         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1109             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1110             return FALSE;
  1114     switch(entryNo) {
  1115     case -1:
  1116     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1117     return FALSE;
  1118     case -2:
  1119     RAISE_TLB_MULTIHIT_ERROR(addr);
  1120     return FALSE;
  1121     default:
  1122         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1123         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1124         if( sh4_icache.page == NULL ) {
  1125             sh4_icache.page_vma = -1;
  1126         } else {
  1127             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1128             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1130         return TRUE;
  1134 /**
  1135  * Translate address for disassembly purposes (ie performs an instruction
  1136  * lookup) - does not raise exceptions or modify any state, and ignores
  1137  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1138  * on translation failure.
  1139  */
  1140 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1142     if( vma & 0x80000000 ) {
  1143         if( vma < 0xC0000000 ) {
  1144             /* P1, P2 and P4 regions are pass-through (no translation) */
  1145             return VMA_TO_EXT_ADDR(vma);
  1146         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1147             /* Not translatable */
  1148             return MMU_VMA_ERROR;
  1152     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1153     if( (mmucr & MMUCR_AT) == 0 ) {
  1154         return VMA_TO_EXT_ADDR(vma);
  1157     int entryNo = mmu_itlb_lookup_vpn( vma );
  1158     if( entryNo == -2 ) {
  1159         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1161     if( entryNo < 0 ) {
  1162         return MMU_VMA_ERROR;
  1163     } else {
  1164         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1165         (vma & (~mmu_itlb[entryNo].mask));
  1169 /********************** TLB Direct-Access Regions ***************************/
  1170 #ifdef HAVE_FRAME_ADDRESS
  1171 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
  1172 #else
  1173 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1174 #endif
  1177 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1179 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1181     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1182     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1185 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1187     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1188     ent->vpn = val & 0xFFFFFC00;
  1189     ent->asid = val & 0x000000FF;
  1190     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1193 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1195     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1196     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1199 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1201     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1202     ent->ppn = val & 0x1FFFFC00;
  1203     ent->flags = val & 0x00001DA;
  1204     ent->mask = get_tlb_size_mask(val);
  1205     if( ent->ppn >= 0x1C000000 )
  1206         ent->ppn |= 0xE0000000;
  1209 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1210 #define UTLB_ASSOC(addr) (addr&0x80)
  1211 #define UTLB_DATA2(addr) (addr&0x00800000)
  1213 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1215     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1216     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1217     ((ent->flags & TLB_DIRTY)<<7);
  1219 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1221     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1222     if( UTLB_DATA2(addr) ) {
  1223         return ent->pcmcia;
  1224     } else {
  1225         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1229 /**
  1230  * Find a UTLB entry for the associative TLB write - same as the normal
  1231  * lookup but ignores the valid bit.
  1232  */
  1233 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1235     int result = -1;
  1236     unsigned int i;
  1237     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1238         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1239                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1240                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1241             if( result != -1 ) {
  1242                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1243                 return -2;
  1245             result = i;
  1248     return result;
  1251 /**
  1252  * Find a ITLB entry for the associative TLB write - same as the normal
  1253  * lookup but ignores the valid bit.
  1254  */
  1255 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1257     int result = -1;
  1258     unsigned int i;
  1259     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1260         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1261                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1262                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1263             if( result != -1 ) {
  1264                 return -2;
  1266             result = i;
  1269     return result;
  1272 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1274     if( UTLB_ASSOC(addr) ) {
  1275         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1276         if( utlb >= 0 ) {
  1277             struct utlb_entry *ent = &mmu_utlb[utlb];
  1278             uint32_t old_flags = ent->flags;
  1279             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1280             ent->flags |= (val & TLB_VALID);
  1281             ent->flags |= ((val & 0x200)>>7);
  1282             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1283                 if( old_flags & TLB_VALID )
  1284                     mmu_utlb_remove_entry( utlb );
  1285                 if( ent->flags & TLB_VALID )
  1286                     mmu_utlb_insert_entry( utlb );
  1290         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1291         if( itlb >= 0 ) {
  1292             struct itlb_entry *ent = &mmu_itlb[itlb];
  1293             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1296         if( itlb == -2 || utlb == -2 ) {
  1297             RAISE_TLB_MULTIHIT_ERROR(addr);
  1298             EXCEPTION_EXIT();
  1299             return;
  1301     } else {
  1302         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1303         if( ent->flags & TLB_VALID ) 
  1304             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1305         ent->vpn = (val & 0xFFFFFC00);
  1306         ent->asid = (val & 0xFF);
  1307         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1308         ent->flags |= (val & TLB_VALID);
  1309         ent->flags |= ((val & 0x200)>>7);
  1310         if( ent->flags & TLB_VALID ) 
  1311             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1315 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1317     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1318     if( UTLB_DATA2(addr) ) {
  1319         ent->pcmcia = val & 0x0000000F;
  1320     } else {
  1321         if( ent->flags & TLB_VALID ) 
  1322             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1323         ent->ppn = (val & 0x1FFFFC00);
  1324         ent->flags = (val & 0x000001FF);
  1325         ent->mask = get_tlb_size_mask(val);
  1326         if( ent->flags & TLB_VALID ) 
  1327             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1331 struct mem_region_fn p4_region_itlb_addr = {
  1332         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1333         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1334         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1335         unmapped_read_burst, unmapped_write_burst,
  1336         unmapped_prefetch };
  1337 struct mem_region_fn p4_region_itlb_data = {
  1338         mmu_itlb_data_read, mmu_itlb_data_write,
  1339         mmu_itlb_data_read, mmu_itlb_data_write,
  1340         mmu_itlb_data_read, mmu_itlb_data_write,
  1341         unmapped_read_burst, unmapped_write_burst,
  1342         unmapped_prefetch };
  1343 struct mem_region_fn p4_region_utlb_addr = {
  1344         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1345         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1346         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1347         unmapped_read_burst, unmapped_write_burst,
  1348         unmapped_prefetch };
  1349 struct mem_region_fn p4_region_utlb_data = {
  1350         mmu_utlb_data_read, mmu_utlb_data_write,
  1351         mmu_utlb_data_read, mmu_utlb_data_write,
  1352         mmu_utlb_data_read, mmu_utlb_data_write,
  1353         unmapped_read_burst, unmapped_write_burst,
  1354         unmapped_prefetch };
  1356 /********************** Error regions **************************/
  1358 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1360     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1361     EXCEPTION_EXIT();
  1364 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1366     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1367     EXCEPTION_EXIT();
  1370 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1372     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1373     EXCEPTION_EXIT();
  1376 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1378     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1379     EXCEPTION_EXIT();
  1382 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1384     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1385     EXCEPTION_EXIT();
  1388 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1390     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1391     EXCEPTION_EXIT();
  1394 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1396     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1397     EXCEPTION_EXIT();
  1400 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1402     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1403     EXCEPTION_EXIT();
  1406 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1408     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1409     EXCEPTION_EXIT();
  1412 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1414     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1415     EXCEPTION_EXIT();
  1418 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1420     MMIO_WRITE(MMU, TEA, addr);
  1421     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1422     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1423     EXCEPTION_EXIT();
  1426 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1428     MMIO_WRITE(MMU, TEA, addr);
  1429     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1430     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1431     EXCEPTION_EXIT();
  1433 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1435     MMIO_WRITE(MMU, TEA, addr);
  1436     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1437     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1438     EXCEPTION_EXIT();
  1441 /**
  1442  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1443  */
  1444 struct mem_region_fn mem_region_address_error = {
  1445         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1446         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1447         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1448         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1449         unmapped_prefetch };
  1451 struct mem_region_fn mem_region_tlb_miss = {
  1452         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1453         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1454         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1455         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1456         unmapped_prefetch };
  1458 struct mem_region_fn mem_region_tlb_protected = {
  1459         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1460         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1461         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1462         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1463         unmapped_prefetch };
  1465 struct mem_region_fn mem_region_tlb_multihit = {
  1466         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1467         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1468         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1469         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1470         (mem_prefetch_fn_t)tlb_multi_hit_read };
  1473 /* Store-queue regions */
  1474 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1475  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1476  * some cases), in contrast to the ordinary fields above.
  1478  * There is probably a simpler way to do this.
  1479  */
  1481 struct mem_region_fn p4_region_storequeue = { 
  1482         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1483         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1484         unmapped_read_long, unmapped_write_long,
  1485         unmapped_read_burst, unmapped_write_burst,
  1486         ccn_storequeue_prefetch }; 
  1488 struct mem_region_fn p4_region_storequeue_miss = { 
  1489         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1490         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1491         unmapped_read_long, unmapped_write_long,
  1492         unmapped_read_burst, unmapped_write_burst,
  1493         (mem_prefetch_fn_t)tlb_miss_read }; 
  1495 struct mem_region_fn p4_region_storequeue_multihit = { 
  1496         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1497         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1498         unmapped_read_long, unmapped_write_long,
  1499         unmapped_read_burst, unmapped_write_burst,
  1500         (mem_prefetch_fn_t)tlb_multi_hit_read }; 
  1502 struct mem_region_fn p4_region_storequeue_protected = {
  1503         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1504         unmapped_read_long, unmapped_write_long,
  1505         unmapped_read_long, unmapped_write_long,
  1506         unmapped_read_burst, unmapped_write_burst,
  1507         (mem_prefetch_fn_t)tlb_protected_read };
  1509 struct mem_region_fn p4_region_storequeue_sqmd = {
  1510         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1511         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1512         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1513         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1514         (mem_prefetch_fn_t)address_error_read };        
  1516 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1517         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1518         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1519         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1520         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1521         (mem_prefetch_fn_t)tlb_miss_read }; 
  1523 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1524         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1525         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1526         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1527         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1528         (mem_prefetch_fn_t)tlb_multi_hit_read };        
  1530 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1531         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1532         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1533         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1534         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1535         (mem_prefetch_fn_t)tlb_protected_read };
.