Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 1217:677b1d85f1b4
prev1202:01ae5cbad4c8
next1295:9067aff5522d
author nkeynes
date Mon Feb 13 20:00:27 2012 +1000 (12 years ago)
permissions -rw-r--r--
last change Fix MMU on non-translated platforms
- reintroduce old VMA translation functions (slightly modified)
- modify shadow processing to work on post-translated memory ops
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    31 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    33 /* Primary address space (used directly by SH4 cores) */
    34 mem_region_fn_t *sh4_address_space;
    35 mem_region_fn_t *sh4_user_address_space;
    37 /* External address space (usually the same as the global ext_address_space) */
    38 static mem_region_fn_t *sh4_ext_address_space;
    40 /* Accessed from the UTLB accessor methods */
    41 uint32_t mmu_urc;
    42 uint32_t mmu_urb;
    43 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
    45 /* Module globals */
    46 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    47 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    48 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    49 static uint32_t mmu_lrui;
    50 static uint32_t mmu_asid; // current asid
    51 static struct utlb_default_regions *mmu_user_storequeue_regions;
    53 /* Structures for 1K page handling */
    54 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    55 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    56 static int mmu_utlb_1k_free_index;
    59 /* Function prototypes */
    60 static void mmu_invalidate_tlb();
    61 static void mmu_utlb_register_all();
    62 static void mmu_utlb_remove_entry(int);
    63 static void mmu_utlb_insert_entry(int);
    64 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    65 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    66 static void mmu_set_tlb_enabled( int tlb_on );
    67 static void mmu_set_tlb_asid( uint32_t asid );
    68 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    69 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    70 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    71 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    72 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    73 static void mmu_utlb_1k_init();
    74 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    75 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    76 static int mmu_read_urc();
    78 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    79 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    80 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    81 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
    82 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    83 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
    84 static uint32_t get_tlb_size_mask( uint32_t flags );
    85 static uint32_t get_tlb_size_pages( uint32_t flags );
    87 #define DEFAULT_REGIONS 0
    88 #define DEFAULT_STOREQUEUE_REGIONS 1
    89 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    91 static struct utlb_default_regions mmu_default_regions[3] = {
    92         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    93         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
    94         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
    96 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
    98 #ifndef SH4_TRANSLATOR
    99 /* Dummy MMU vtable functions */
   100 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
   101 {
   102 }
   103 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
   104 {
   105 }
   106 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
   107 {
   108 }
   109 #endif
   111 /*********************** Module public functions ****************************/
   113 /**
   114  * Allocate memory for the address space maps, and initialize them according
   115  * to the default (reset) values. (TLB is disabled by default)
   116  */
   118 void MMU_init()
   119 {
   120     sh4_ext_address_space = ext_address_space;
   121     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   122     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   123     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   125     mmu_set_tlb_enabled(0);
   126     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   127     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   129     /* Setup P4 tlb/cache access regions */
   130     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   131     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   132     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   133     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   134     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   135     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   136     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   137     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   138     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   139     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   140     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   142     /* Setup P4 control region */
   143     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   144     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   145     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   146     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   147     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   148     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   149     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   150     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   151     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   152     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   153     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   154     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   155     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   157     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   158     mmu_utlb_1k_init();
   160     /* Ensure the code regions are executable. Although it might
   161      * be more portable to mmap these at runtime rather than using static decls
   162      */
   163     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   164     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   165 }
   167 void MMU_reset()
   168 {
   169     mmio_region_MMU_write( CCR, 0 );
   170     mmio_region_MMU_write( MMUCR, 0 );
   171 }
   173 void MMU_save_state( FILE *f )
   174 {
   175     mmu_read_urc();   
   176     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   177     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   178     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   179     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   180     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   181     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   182 }
   184 int MMU_load_state( FILE *f )
   185 {
   186     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   187         return 1;
   188     }
   189     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   190         return 1;
   191     }
   192     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   193         return 1;
   194     }
   195     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   196         return 1;
   197     }
   198     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   199         return 1;
   200     }
   201     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   202         return 1;
   203     }
   205     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   206     mmu_urc_overflow = mmu_urc >= mmu_urb;
   207     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   208     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   209     return 0;
   210 }
   212 /**
   213  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   214  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   215  */
   216 void MMU_ldtlb()
   217 {
   218     int urc = mmu_read_urc();
   219     if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
   220         mmu_utlb_remove_entry( urc );
   221     mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   222     mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   223     mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   224     mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   225     mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
   226     mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
   227     if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
   228         mmu_utlb_insert_entry( urc );
   229 }
   232 MMIO_REGION_READ_FN( MMU, reg )
   233 {
   234     reg &= 0xFFF;
   235     switch( reg ) {
   236     case MMUCR:
   237         return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   238     default:
   239         return MMIO_READ( MMU, reg );
   240     }
   241 }
   243 MMIO_REGION_READ_DEFSUBFNS(MMU)
   245 MMIO_REGION_WRITE_FN( MMU, reg, val )
   246 {
   247     uint32_t tmp;
   248     reg &= 0xFFF;
   249     switch(reg) {
   250     case SH4VER:
   251         return;
   252     case PTEH:
   253         val &= 0xFFFFFCFF;
   254         if( (val & 0xFF) != mmu_asid ) {
   255             mmu_set_tlb_asid( val&0xFF );
   256         }
   257         break;
   258     case PTEL:
   259         val &= 0x1FFFFDFF;
   260         break;
   261     case PTEA:
   262         val &= 0x0000000F;
   263         break;
   264     case TRA:
   265         val &= 0x000003FC;
   266         break;
   267     case EXPEVT:
   268     case INTEVT:
   269         val &= 0x00000FFF;
   270         break;
   271     case MMUCR:
   272         if( val & MMUCR_TI ) {
   273             mmu_invalidate_tlb();
   274         }
   275         mmu_urc = (val >> 10) & 0x3F;
   276         mmu_urb = (val >> 18) & 0x3F;
   277         if( mmu_urb == 0 ) {
   278             mmu_urb = 0x40;
   279         } else if( mmu_urc >= mmu_urb ) {
   280             mmu_urc_overflow = TRUE;
   281         }
   282         mmu_lrui = (val >> 26) & 0x3F;
   283         val &= 0x00000301;
   284         tmp = MMIO_READ( MMU, MMUCR );
   285         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   286             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   287         }
   288         if( (val ^ tmp) & (MMUCR_AT) ) {
   289             // AT flag has changed state - flush the xlt cache as all bets
   290             // are off now. We also need to force an immediate exit from the
   291             // current block
   292             mmu_set_tlb_enabled( val & MMUCR_AT );
   293             MMIO_WRITE( MMU, MMUCR, val );
   294             sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
   295             xlat_flush_cache(); // If we're not running, flush the cache anyway
   296         }
   297         break;
   298     case CCR:
   299         CCN_set_cache_control( val );
   300         val &= 0x81A7;
   301         break;
   302     case MMUUNK1:
   303         /* Note that if the high bit is set, this appears to reset the machine.
   304          * Not emulating this behaviour yet until we know why...
   305          */
   306         val &= 0x00010007;
   307         break;
   308     case QACR0:
   309     case QACR1:
   310         val &= 0x0000001C;
   311         break;
   312     case PMCR1:
   313         PMM_write_control(0, val);
   314         val &= 0x0000C13F;
   315         break;
   316     case PMCR2:
   317         PMM_write_control(1, val);
   318         val &= 0x0000C13F;
   319         break;
   320     default:
   321         break;
   322     }
   323     MMIO_WRITE( MMU, reg, val );
   324 }
   326 /********************** 1K Page handling ***********************/
   327 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   328  * effort to manage - we justify this on the basis that most programs won't
   329  * actually use 1K pages, so we may as well optimize for the common case.
   330  * 
   331  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   332  * redirects requests to the 'real' page entry. These are allocated on an
   333  * as-needed basis, and returned to the pool when all subpages are empty.
   334  */ 
   335 static void mmu_utlb_1k_init()
   336 {
   337     int i;
   338     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   339         mmu_utlb_1k_free_list[i] = i;
   340         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   341     }
   342     mmu_utlb_1k_free_index = 0;
   343 }
   345 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   346 {
   347     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   348     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
   349     return entry;
   350 }    
   352 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   353 {
   354     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   355     assert( entryNo < UTLB_ENTRY_COUNT );
   356     assert( mmu_utlb_1k_free_index > 0 );
   357     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   358 }
   361 /********************** Address space maintenance *************************/
   363 mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *ext )
   364 {
   365     mem_region_fn_t *old_ext = sh4_ext_address_space;
   366     sh4_ext_address_space = ext;
   367     mmu_set_tlb_enabled(IS_TLB_ENABLED());
   368     return old_ext;
   369 }
   371 /**
   372  * MMU accessor functions just increment URC - fixup here if necessary
   373  */
   374 static int mmu_read_urc()
   375 {
   376     if( mmu_urc_overflow ) {
   377         if( mmu_urc >= 0x40 ) {
   378             mmu_urc_overflow = FALSE;
   379             mmu_urc -= 0x40;
   380             mmu_urc %= mmu_urb;
   381         }
   382     } else {
   383         mmu_urc %= mmu_urb;
   384     }
   385     return mmu_urc;
   386 }
   388 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   389 {
   390     int count = (end - start) >> 12;
   391     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   392     while( count-- > 0 ) {
   393         *ptr++ = fn;
   394     }
   395 }
   396 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   397 {
   398     int count = (end - start) >> 12;
   399     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   400     while( count-- > 0 ) {
   401         *ptr++ = fn;
   402     }
   403 }
   405 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   406 {
   407     unsigned int i;
   408     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   409         /* TLB on */
   410         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   411         sh4_address_space[(page|0xA0000000)>>12] = fn;
   412         /* Scan UTLB and update any direct-referencing entries */
   413     } else {
   414         /* Direct map to U0, P0, P1, P2, P3 */
   415         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   416             sh4_address_space[(page|i)>>12] = fn;
   417         }
   418         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   419             sh4_user_address_space[(page|i)>>12] = fn;
   420         }
   421     }
   422     return TRUE;
   423 }
   425 static void mmu_set_tlb_enabled( int tlb_on )
   426 {
   427     mem_region_fn_t *ptr, *uptr;
   428     int i;
   430     /* Reset the storequeue area */
   432     if( tlb_on ) {
   433         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   434         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   435         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   437         /* Default SQ prefetch goes to TLB miss (?) */
   438         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   439         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   440         mmu_utlb_register_all();
   441     } else {
   442         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   443             memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   444         }
   445         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   446             memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   447         }
   449         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   450         if( IS_STOREQUEUE_PROTECTED() ) {
   451             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   452         } else {
   453             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   454         }
   455     }
   457 }
   459 /**
   460  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   461  * anything expects to do this frequently.
   462  */
   463 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   464 {
   465     mem_region_fn_t nontlb_region;
   466     int i;
   468     if( protected ) {
   469         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   470         nontlb_region = &p4_region_storequeue_sqmd;
   471     } else {
   472         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   473         nontlb_region = &p4_region_storequeue; 
   474     }
   476     if( tlb_on ) {
   477         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   478         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   479             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   480                 mmu_utlb_insert_entry(i);
   481             }
   482         }
   483     } else {
   484         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   485     }
   487 }
   489 static void mmu_set_tlb_asid( uint32_t asid )
   490 {
   491     if( IS_TLB_ENABLED() ) {
   492         /* Scan for pages that need to be remapped */
   493         int i;
   494         if( IS_SV_ENABLED() ) {
   495             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   496                 if( mmu_utlb[i].asid == mmu_asid &&
   497                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   498                     // Matches old ASID - unmap out
   499                     if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   500                             get_tlb_size_pages(mmu_utlb[i].flags) ) )
   501                         mmu_utlb_remap_pages( FALSE, TRUE, i );
   502                 }
   503             }
   504             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   505                 if( mmu_utlb[i].asid == asid &&
   506                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   507                     // Matches new ASID - map in
   508                     mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
   509                             mmu_utlb[i].vpn&mmu_utlb[i].mask,
   510                             get_tlb_size_pages(mmu_utlb[i].flags) );
   511                 }
   512             }
   513         } else {
   514             // Remap both Priv+user pages
   515             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   516                 if( mmu_utlb[i].asid == mmu_asid &&
   517                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   518                     if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   519                             get_tlb_size_pages(mmu_utlb[i].flags) ) )
   520                         mmu_utlb_remap_pages( TRUE, TRUE, i );
   521                 }
   522             }
   523             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   524                 if( mmu_utlb[i].asid == asid &&
   525                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   526                     mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
   527                             mmu_utlb[i].vpn&mmu_utlb[i].mask,
   528                             get_tlb_size_pages(mmu_utlb[i].flags) );
   529                 }
   530             }
   531         }
   532         sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   533     }
   534     mmu_asid = asid;
   535 }
   537 static uint32_t get_tlb_size_mask( uint32_t flags )
   538 {
   539     switch( flags & TLB_SIZE_MASK ) {
   540     case TLB_SIZE_1K: return MASK_1K;
   541     case TLB_SIZE_4K: return MASK_4K;
   542     case TLB_SIZE_64K: return MASK_64K;
   543     case TLB_SIZE_1M: return MASK_1M;
   544     default: return 0; /* Unreachable */
   545     }
   546 }
   547 static uint32_t get_tlb_size_pages( uint32_t flags )
   548 {
   549     switch( flags & TLB_SIZE_MASK ) {
   550     case TLB_SIZE_1K: return 0;
   551     case TLB_SIZE_4K: return 1;
   552     case TLB_SIZE_64K: return 16;
   553     case TLB_SIZE_1M: return 256;
   554     default: return 0; /* Unreachable */
   555     }
   556 }
   558 /**
   559  * Add a new TLB entry mapping to the address space table. If any of the pages
   560  * are already mapped, they are mapped to the TLB multi-hit page instead.
   561  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   562  */ 
   563 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   564 {
   565     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   566     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   567     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   568     struct utlb_default_regions *userdefs = privdefs;    
   570     gboolean mapping_ok = TRUE;
   571     int i;
   573     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   574         /* Storequeue mapping */
   575         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   576         userdefs = mmu_user_storequeue_regions;
   577     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   578         user_page = NULL; /* No user access to P3 region */
   579     } else if( start_addr >= 0x80000000 ) {
   580         return TRUE; // No mapping - legal but meaningless
   581     }
   583     if( npages == 0 ) {
   584         struct utlb_1k_entry *ent;
   585         int i, idx = (start_addr >> 10) & 0x03;
   586         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   587             ent = (struct utlb_1k_entry *)*ptr;
   588         } else {
   589             ent = mmu_utlb_1k_alloc();
   590             /* New 1K struct - init to previous contents of region */
   591             for( i=0; i<4; i++ ) {
   592                 ent->subpages[i] = *ptr;
   593                 ent->user_subpages[i] = *uptr;
   594             }
   595             *ptr = &ent->fn;
   596             *uptr = &ent->user_fn;
   597         }
   599         if( priv_page != NULL ) {
   600             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   601                 ent->subpages[idx] = priv_page;
   602             } else {
   603                 mapping_ok = FALSE;
   604                 ent->subpages[idx] = privdefs->tlb_multihit;
   605             }
   606         }
   607         if( user_page != NULL ) {
   608             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   609                 ent->user_subpages[idx] = user_page;
   610             } else {
   611                 mapping_ok = FALSE;
   612                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   613             }
   614         }
   616     } else {
   617         if( priv_page != NULL ) {
   618             /* Privileged mapping only */
   619             for( i=0; i<npages; i++ ) {
   620                 if( *ptr == privdefs->tlb_miss ) {
   621                     *ptr++ = priv_page;
   622                 } else {
   623                     mapping_ok = FALSE;
   624                     *ptr++ = privdefs->tlb_multihit;
   625                 }
   626             }
   627         }
   628         if( user_page != NULL ) {
   629             /* User mapping only (eg ASID change remap w/ SV=1) */
   630             for( i=0; i<npages; i++ ) {
   631                 if( *uptr == userdefs->tlb_miss ) {
   632                     *uptr++ = user_page;
   633                 } else {
   634                     mapping_ok = FALSE;
   635                     *uptr++ = userdefs->tlb_multihit;
   636                 }
   637             }        
   638         }
   639     }
   641     return mapping_ok;
   642 }
   644 /**
   645  * Remap any pages within the region covered by entryNo, but not including 
   646  * entryNo itself. This is used to reestablish pages that were previously
   647  * covered by a multi-hit exception region when one of the pages is removed.
   648  */
   649 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   650 {
   651     int mask = mmu_utlb[entryNo].mask;
   652     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   653     int i;
   655     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   656         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   657             /* Overlapping region */
   658             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   659             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   660             uint32_t start_addr;
   661             int npages;
   663             if( mmu_utlb[i].mask >= mask ) {
   664                 /* entry is no larger than the area we're replacing - map completely */
   665                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   666                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   667             } else {
   668                 /* Otherwise map subset - region covered by removed page */
   669                 start_addr = remap_addr;
   670                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   671             }
   673             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   674                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   675             } else if( IS_SV_ENABLED() ) {
   676                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   677             }
   679         }
   680     }
   681 }
   683 /**
   684  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   685  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   686  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   687  */
   688 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   689 {
   690     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   691     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   692     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   693     struct utlb_default_regions *userdefs = privdefs;
   695     gboolean unmapping_ok = TRUE;
   696     int i;
   698     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   699         /* Storequeue mapping */
   700         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   701         userdefs = mmu_user_storequeue_regions;
   702     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   703         unmap_user = FALSE;
   704     } else if( start_addr >= 0x80000000 ) {
   705         return TRUE; // No mapping - legal but meaningless
   706     }
   708     if( npages == 0 ) { // 1K page
   709         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   710         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   711         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   712         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   713             unmapping_ok = FALSE;
   714         }
   715         if( unmap_priv )
   716             ent->subpages[idx] = privdefs->tlb_miss;
   717         if( unmap_user )
   718             ent->user_subpages[idx] = userdefs->tlb_miss;
   720         /* If all 4 subpages have the same content, merge them together and
   721          * release the 1K entry
   722          */
   723         mem_region_fn_t priv_page = ent->subpages[0];
   724         mem_region_fn_t user_page = ent->user_subpages[0];
   725         for( i=1; i<4; i++ ) {
   726             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   727                 mergeable = 0;
   728                 break;
   729             }
   730         }
   731         if( mergeable ) {
   732             mmu_utlb_1k_free(ent);
   733             *ptr = priv_page;
   734             *uptr = user_page;
   735         }
   736     } else {
   737         if( unmap_priv ) {
   738             /* Privileged (un)mapping */
   739             for( i=0; i<npages; i++ ) {
   740                 if( *ptr == privdefs->tlb_multihit ) {
   741                     unmapping_ok = FALSE;
   742                 }
   743                 *ptr++ = privdefs->tlb_miss;
   744             }
   745         }
   746         if( unmap_user ) {
   747             /* User (un)mapping */
   748             for( i=0; i<npages; i++ ) {
   749                 if( *uptr == userdefs->tlb_multihit ) {
   750                     unmapping_ok = FALSE;
   751                 }
   752                 *uptr++ = userdefs->tlb_miss;
   753             }            
   754         }
   755     }
   757     return unmapping_ok;
   758 }
   760 static void mmu_utlb_insert_entry( int entry )
   761 {
   762     struct utlb_entry *ent = &mmu_utlb[entry];
   763     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   764     mem_region_fn_t upage;
   765     sh4addr_t start_addr = ent->vpn & ent->mask;
   766     int npages = get_tlb_size_pages(ent->flags);
   768     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   769         /* Store queue mappings are a bit different - normal access is fixed to
   770          * the store queue register block, and we only map prefetches through
   771          * the TLB 
   772          */
   773         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   775         if( (ent->flags & TLB_USERMODE) == 0 ) {
   776             upage = mmu_user_storequeue_regions->tlb_prot;
   777         } else if( IS_STOREQUEUE_PROTECTED() ) {
   778             upage = &p4_region_storequeue_sqmd;
   779         } else {
   780             upage = page;
   781         }
   783     }  else {
   785         if( (ent->flags & TLB_USERMODE) == 0 ) {
   786             upage = &mem_region_tlb_protected;
   787         } else {        
   788             upage = page;
   789         }
   791         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   792             page->write_long = (mem_write_fn_t)tlb_protected_write;
   793             page->write_word = (mem_write_fn_t)tlb_protected_write;
   794             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   795             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   796             page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
   797             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   798         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   799             page->write_long = (mem_write_fn_t)tlb_initial_write;
   800             page->write_word = (mem_write_fn_t)tlb_initial_write;
   801             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   802             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   803             page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
   804             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   805         } else {
   806             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   807         }
   808     }
   810     mmu_utlb_pages[entry].user_fn = upage;
   812     /* Is page visible? */
   813     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   814         mmu_utlb_map_pages( page, upage, start_addr, npages );
   815     } else if( IS_SV_ENABLED() ) {
   816         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   817     }
   818 }
   820 static void mmu_utlb_remove_entry( int entry )
   821 {
   822     int i, j;
   823     struct utlb_entry *ent = &mmu_utlb[entry];
   824     sh4addr_t start_addr = ent->vpn&ent->mask;
   825     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   826     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   827     gboolean unmap_user;
   828     int npages = get_tlb_size_pages(ent->flags);
   830     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   831         unmap_user = TRUE;
   832     } else if( IS_SV_ENABLED() ) {
   833         unmap_user = FALSE;
   834     } else {
   835         return; // Not mapped
   836     }
   838     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   840     if( !clean_unmap ) {
   841         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   842     }
   843 }
   845 static void mmu_utlb_register_all()
   846 {
   847     int i;
   848     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   849         if( mmu_utlb[i].flags & TLB_VALID ) 
   850             mmu_utlb_insert_entry( i );
   851     }
   852 }
   854 static void mmu_invalidate_tlb()
   855 {
   856     int i;
   857     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   858         mmu_itlb[i].flags &= (~TLB_VALID);
   859     }
   860     if( IS_TLB_ENABLED() ) {
   861         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   862             if( mmu_utlb[i].flags & TLB_VALID ) {
   863                 mmu_utlb_remove_entry( i );
   864             }
   865         }
   866     }
   867     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   868         mmu_utlb[i].flags &= (~TLB_VALID);
   869     }
   870 }
   872 /******************************************************************************/
   873 /*                        MMU TLB address translation                         */
   874 /******************************************************************************/
   876 /**
   877  * Translate a 32-bit address into a UTLB entry number. Does not check for
   878  * page protection etc.
   879  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   880  */
   881 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   882 {
   883     mmu_urc++;
   884     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   885     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   886         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   887     } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
   888         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
   889         fn = ent->subpages[(vpn>>10)&0x03];
   890         if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   891             return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   892         }            
   893     } 
   894     if( fn == &mem_region_tlb_multihit ) {
   895         return -2;
   896     } else {
   897         return -1;
   898     }
   899 }
   902 /**
   903  * Perform the actual utlb lookup w/ asid matching.
   904  * Possible utcomes are:
   905  *   0..63 Single match - good, return entry found
   906  *   -1 No match - raise a tlb data miss exception
   907  *   -2 Multiple matches - raise a multi-hit exception (reset)
   908  * @param vpn virtual address to resolve
   909  * @return the resultant UTLB entry, or an error.
   910  */
   911 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   912 {
   913     int result = -1;
   914     unsigned int i;
   916     mmu_urc++;
   917     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   918         mmu_urc = 0;
   919     }
   921     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   922         if( (mmu_utlb[i].flags & TLB_VALID) &&
   923                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   924                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   925             if( result != -1 ) {
   926                 return -2;
   927             }
   928             result = i;
   929         }
   930     }
   931     return result;
   932 }
   934 /**
   935  * Perform the actual utlb lookup matching on vpn only
   936  * Possible utcomes are:
   937  *   0..63 Single match - good, return entry found
   938  *   -1 No match - raise a tlb data miss exception
   939  *   -2 Multiple matches - raise a multi-hit exception (reset)
   940  * @param vpn virtual address to resolve
   941  * @return the resultant UTLB entry, or an error.
   942  */
   943 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   944 {
   945     int result = -1;
   946     unsigned int i;
   948     mmu_urc++;
   949     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   950         mmu_urc = 0;
   951     }
   953     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   954         if( (mmu_utlb[i].flags & TLB_VALID) &&
   955                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   956             if( result != -1 ) {
   957                 return -2;
   958             }
   959             result = i;
   960         }
   961     }
   963     return result;
   964 }
   966 /**
   967  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   968  * @return the number (0-3) of the replaced entry.
   969  */
   970 static int inline mmu_itlb_update_from_utlb( int entryNo )
   971 {
   972     int replace;
   973     /* Determine entry to replace based on lrui */
   974     if( (mmu_lrui & 0x38) == 0x38 ) {
   975         replace = 0;
   976         mmu_lrui = mmu_lrui & 0x07;
   977     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   978         replace = 1;
   979         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   980     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   981         replace = 2;
   982         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   983     } else { // Note - gets invalid entries too
   984         replace = 3;
   985         mmu_lrui = (mmu_lrui | 0x0B);
   986     }
   988     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   989     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   990     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   991     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   992     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   993     return replace;
   994 }
   996 /**
   997  * Perform the actual itlb lookup w/ asid protection
   998  * Possible utcomes are:
   999  *   0..63 Single match - good, return entry found
  1000  *   -1 No match - raise a tlb data miss exception
  1001  *   -2 Multiple matches - raise a multi-hit exception (reset)
  1002  * @param vpn virtual address to resolve
  1003  * @return the resultant ITLB entry, or an error.
  1004  */
  1005 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
  1007     int result = -1;
  1008     unsigned int i;
  1010     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1011         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1012                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
  1013                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1014             if( result != -1 ) {
  1015                 return -2;
  1017             result = i;
  1021     if( result == -1 ) {
  1022         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
  1023         if( utlbEntry < 0 ) {
  1024             return utlbEntry;
  1025         } else {
  1026             return mmu_itlb_update_from_utlb( utlbEntry );
  1030     switch( result ) {
  1031     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1032     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1033     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1034     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1037     return result;
  1040 /**
  1041  * Perform the actual itlb lookup on vpn only
  1042  * Possible utcomes are:
  1043  *   0..63 Single match - good, return entry found
  1044  *   -1 No match - raise a tlb data miss exception
  1045  *   -2 Multiple matches - raise a multi-hit exception (reset)
  1046  * @param vpn virtual address to resolve
  1047  * @return the resultant ITLB entry, or an error.
  1048  */
  1049 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
  1051     int result = -1;
  1052     unsigned int i;
  1054     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1055         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1056                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1057             if( result != -1 ) {
  1058                 return -2;
  1060             result = i;
  1064     if( result == -1 ) {
  1065         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1066         if( utlbEntry < 0 ) {
  1067             return utlbEntry;
  1068         } else {
  1069             return mmu_itlb_update_from_utlb( utlbEntry );
  1073     switch( result ) {
  1074     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1075     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1076     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1077     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1080     return result;
  1083 /**
  1084  * Update the icache for an untranslated address
  1085  */
  1086 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1088     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1089         /* Main ram */
  1090         sh4_icache.page_vma = addr & 0xFF000000;
  1091         sh4_icache.page_ppa = 0x0C000000;
  1092         sh4_icache.mask = 0xFF000000;
  1093         sh4_icache.page = dc_main_ram;
  1094     } else if( (addr & 0x1FE00000) == 0 ) {
  1095         /* BIOS ROM */
  1096         sh4_icache.page_vma = addr & 0xFFE00000;
  1097         sh4_icache.page_ppa = 0;
  1098         sh4_icache.mask = 0xFFE00000;
  1099         sh4_icache.page = dc_boot_rom;
  1100     } else {
  1101         /* not supported */
  1102         sh4_icache.page_vma = -1;
  1106 /**
  1107  * Update the sh4_icache structure to describe the page(s) containing the
  1108  * given vma. If the address does not reference a RAM/ROM region, the icache
  1109  * will be invalidated instead.
  1110  * If AT is on, this method will raise TLB exceptions normally
  1111  * (hence this method should only be used immediately prior to execution of
  1112  * code), and otherwise will set the icache according to the matching TLB entry.
  1113  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1114  * the icache.
  1115  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1116  * if an exception was raised.
  1117  */
  1118 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1120     int entryNo;
  1121     if( IS_SH4_PRIVMODE()  ) {
  1122         if( addr & 0x80000000 ) {
  1123             if( addr < 0xC0000000 ) {
  1124                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1125                 mmu_update_icache_phys(addr);
  1126                 return TRUE;
  1127             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1128                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1129                 return FALSE;
  1133         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1134         if( (mmucr & MMUCR_AT) == 0 ) {
  1135             mmu_update_icache_phys(addr);
  1136             return TRUE;
  1139         if( (mmucr & MMUCR_SV) == 0 )
  1140         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1141         else
  1142         	entryNo = mmu_itlb_lookup_vpn( addr );
  1143     } else {
  1144         if( addr & 0x80000000 ) {
  1145             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1146             return FALSE;
  1149         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1150         if( (mmucr & MMUCR_AT) == 0 ) {
  1151             mmu_update_icache_phys(addr);
  1152             return TRUE;
  1155         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1157         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1158             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1159             return FALSE;
  1163     switch(entryNo) {
  1164     case -1:
  1165     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1166     return FALSE;
  1167     case -2:
  1168     RAISE_TLB_MULTIHIT_ERROR(addr);
  1169     return FALSE;
  1170     default:
  1171         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1172         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1173         if( sh4_icache.page == NULL ) {
  1174             sh4_icache.page_vma = -1;
  1175         } else {
  1176             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1177             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1179         return TRUE;
  1183 /**
  1184  * Translate address for disassembly purposes (ie performs an instruction
  1185  * lookup) - does not raise exceptions or modify any state, and ignores
  1186  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1187  * on translation failure.
  1188  */
  1189 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1191     if( vma & 0x80000000 ) {
  1192         if( vma < 0xC0000000 ) {
  1193             /* P1, P2 and P4 regions are pass-through (no translation) */
  1194             return VMA_TO_EXT_ADDR(vma);
  1195         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1196             /* Not translatable */
  1197             return MMU_VMA_ERROR;
  1201     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1202     if( (mmucr & MMUCR_AT) == 0 ) {
  1203         return VMA_TO_EXT_ADDR(vma);
  1206     int entryNo = mmu_itlb_lookup_vpn( vma );
  1207     if( entryNo == -2 ) {
  1208         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1210     if( entryNo < 0 ) {
  1211         return MMU_VMA_ERROR;
  1212     } else {
  1213         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1214         (vma & (~mmu_itlb[entryNo].mask));
  1218 /**
  1219  * Translate a virtual to physical address for reading, raising exceptions as
  1220  * observed.
  1221  * @param addr Pointer to the virtual memory address. On successful return,
  1222  * will be updated to contain the physical address.
  1223  */
  1224 mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *paddr )
  1226     sh4vma_t addr = *paddr;
  1227     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1228     if( addr & 0x80000000 ) {
  1229         if( IS_SH4_PRIVMODE() ) {
  1230             if( addr >= 0xE0000000 ) {
  1231                 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
  1232             } else if( addr < 0xC0000000 ) {
  1233                 /* P1, P2 regions are pass-through (no translation) */
  1234                 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1236         } else {
  1237             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
  1238                     ((mmucr&MMUCR_SQMD) == 0) ) {
  1239                 /* Conditional user-mode access to the store-queue (no translation) */
  1240                 return &p4_region_storequeue;
  1242             sh4_raise_exception(EXC_DATA_ADDR_READ);
  1243             return NULL;
  1247     if( (mmucr & MMUCR_AT) == 0 ) {
  1248         return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1251     /* If we get this far, translation is required */
  1252     int entryNo;
  1253     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1254         entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1255     } else {
  1256         entryNo = mmu_utlb_lookup_vpn( addr );
  1259     switch(entryNo) {
  1260     case -1:
  1261         RAISE_TLB_ERROR(EXC_TLB_MISS_READ,addr);
  1262         return NULL;
  1263     case -2:
  1264         RAISE_TLB_MULTIHIT_ERROR(addr);
  1265         return NULL;
  1266     default:
  1267         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
  1268                 !IS_SH4_PRIVMODE() ) {
  1269             /* protection violation */
  1270             RAISE_MEM_ERROR(EXC_TLB_PROT_READ,addr);
  1271             return NULL;
  1274         /* finally generate the target address */
  1275         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1276                 (addr & (~mmu_utlb[entryNo].mask));
  1277         if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
  1278             addr = pma | 0xE0000000;
  1279             *paddr = addr;
  1280             return sh4_address_space[addr>>12];
  1281         } else {
  1282             *paddr = pma;
  1283             return sh4_ext_address_space[pma>>12];
  1288 /**
  1289  * Translate a virtual to physical address for prefetch, which mostly
  1290  * does not raise exceptions.
  1291  * @param addr Pointer to the virtual memory address. On successful return,
  1292  * will be updated to contain the physical address.
  1293  */
  1294 mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *paddr )
  1296     sh4vma_t addr = *paddr;
  1297     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1298     if( addr & 0x80000000 ) {
  1299         if( IS_SH4_PRIVMODE() ) {
  1300             if( addr >= 0xE0000000 ) {
  1301                 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
  1302             } else if( addr < 0xC0000000 ) {
  1303                 /* P1, P2 regions are pass-through (no translation) */
  1304                 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1306         } else {
  1307             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
  1308                     ((mmucr&MMUCR_SQMD) == 0) ) {
  1309                 /* Conditional user-mode access to the store-queue (no translation) */
  1310                 return &p4_region_storequeue;
  1312             sh4_raise_exception(EXC_DATA_ADDR_READ);
  1313             return NULL;
  1317     if( (mmucr & MMUCR_AT) == 0 ) {
  1318         return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1321     /* If we get this far, translation is required */
  1322     int entryNo;
  1323     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1324         entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1325     } else {
  1326         entryNo = mmu_utlb_lookup_vpn( addr );
  1329     switch(entryNo) {
  1330     case -1:
  1331         return &mem_region_unmapped;
  1332     case -2:
  1333         RAISE_TLB_MULTIHIT_ERROR(addr);
  1334         return NULL;
  1335     default:
  1336         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
  1337                 !IS_SH4_PRIVMODE() ) {
  1338             /* protection violation */
  1339             return &mem_region_unmapped;
  1342         /* finally generate the target address */
  1343         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1344                 (addr & (~mmu_utlb[entryNo].mask));
  1345         if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
  1346             addr = pma | 0xE0000000;
  1347             *paddr = addr;
  1348             return sh4_address_space[addr>>12];
  1349         } else {
  1350             *paddr = pma;
  1351             return sh4_ext_address_space[pma>>12];
  1356 /**
  1357  * Translate a virtual to physical address for writing, raising exceptions as
  1358  * observed.
  1359  */
  1360 mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *paddr )
  1362     sh4vma_t addr = *paddr;
  1363     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1364     if( addr & 0x80000000 ) {
  1365         if( IS_SH4_PRIVMODE() ) {
  1366             if( addr >= 0xE0000000 ) {
  1367                 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
  1368             } else if( addr < 0xC0000000 ) {
  1369                 /* P1, P2 regions are pass-through (no translation) */
  1370                 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1372         } else {
  1373             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
  1374                     ((mmucr&MMUCR_SQMD) == 0) ) {
  1375                 /* Conditional user-mode access to the store-queue (no translation) */
  1376                 return &p4_region_storequeue;
  1378             sh4_raise_exception(EXC_DATA_ADDR_WRITE);
  1379             return NULL;
  1383     if( (mmucr & MMUCR_AT) == 0 ) {
  1384         return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
  1387     /* If we get this far, translation is required */
  1388     int entryNo;
  1389     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1390         entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1391     } else {
  1392         entryNo = mmu_utlb_lookup_vpn( addr );
  1395     switch(entryNo) {
  1396     case -1:
  1397         RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE,addr);
  1398         return NULL;
  1399     case -2:
  1400         RAISE_TLB_MULTIHIT_ERROR(addr);
  1401         return NULL;
  1402     default:
  1403         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
  1404                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
  1405             /* protection violation */
  1406             RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE,addr);
  1407             return NULL;
  1410         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
  1411             RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1412             return NULL;
  1415         /* finally generate the target address */
  1416         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1417                 (addr & (~mmu_utlb[entryNo].mask));
  1418         if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
  1419             addr = pma | 0xE0000000;
  1420             *paddr = addr;
  1421             return sh4_address_space[addr>>12];
  1422         } else {
  1423             *paddr = pma;
  1424             return sh4_ext_address_space[pma>>12];
  1431 /********************** TLB Direct-Access Regions ***************************/
  1432 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1434 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1436     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1437     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1440 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1442     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1443     ent->vpn = val & 0xFFFFFC00;
  1444     ent->asid = val & 0x000000FF;
  1445     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1448 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1450     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1451     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1454 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1456     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1457     ent->ppn = val & 0x1FFFFC00;
  1458     ent->flags = val & 0x00001DA;
  1459     ent->mask = get_tlb_size_mask(val);
  1460     if( ent->ppn >= 0x1C000000 )
  1461         ent->ppn |= 0xE0000000;
  1464 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1465 #define UTLB_ASSOC(addr) (addr&0x80)
  1466 #define UTLB_DATA2(addr) (addr&0x00800000)
  1468 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1470     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1471     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1472     ((ent->flags & TLB_DIRTY)<<7);
  1474 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1476     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1477     if( UTLB_DATA2(addr) ) {
  1478         return ent->pcmcia;
  1479     } else {
  1480         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1484 /**
  1485  * Find a UTLB entry for the associative TLB write - same as the normal
  1486  * lookup but ignores the valid bit.
  1487  */
  1488 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1490     int result = -1;
  1491     unsigned int i;
  1492     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1493         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1494                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1495                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1496             if( result != -1 ) {
  1497                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1498                 return -2;
  1500             result = i;
  1503     return result;
  1506 /**
  1507  * Find a ITLB entry for the associative TLB write - same as the normal
  1508  * lookup but ignores the valid bit.
  1509  */
  1510 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1512     int result = -1;
  1513     unsigned int i;
  1514     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1515         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1516                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1517                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1518             if( result != -1 ) {
  1519                 return -2;
  1521             result = i;
  1524     return result;
  1527 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1529     if( UTLB_ASSOC(addr) ) {
  1530         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1531         if( utlb >= 0 ) {
  1532             struct utlb_entry *ent = &mmu_utlb[utlb];
  1533             uint32_t old_flags = ent->flags;
  1534             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1535             ent->flags |= (val & TLB_VALID);
  1536             ent->flags |= ((val & 0x200)>>7);
  1537             if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1538                 if( old_flags & TLB_VALID )
  1539                     mmu_utlb_remove_entry( utlb );
  1540                 if( ent->flags & TLB_VALID )
  1541                     mmu_utlb_insert_entry( utlb );
  1545         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1546         if( itlb >= 0 ) {
  1547             struct itlb_entry *ent = &mmu_itlb[itlb];
  1548             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1551         if( itlb == -2 || utlb == -2 ) {
  1552             RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
  1553             SH4_EXCEPTION_EXIT();
  1554             return;
  1556     } else {
  1557         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1558         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1559             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1560         ent->vpn = (val & 0xFFFFFC00);
  1561         ent->asid = (val & 0xFF);
  1562         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1563         ent->flags |= (val & TLB_VALID);
  1564         ent->flags |= ((val & 0x200)>>7);
  1565         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1566             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1570 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1572     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1573     if( UTLB_DATA2(addr) ) {
  1574         ent->pcmcia = val & 0x0000000F;
  1575     } else {
  1576         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1577             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1578         ent->ppn = (val & 0x1FFFFC00);
  1579         ent->flags = (val & 0x000001FF);
  1580         ent->mask = get_tlb_size_mask(val);
  1581         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1582             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1586 struct mem_region_fn p4_region_itlb_addr = {
  1587         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1588         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1589         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1590         unmapped_read_burst, unmapped_write_burst,
  1591         unmapped_prefetch, mmu_itlb_addr_read };
  1592 struct mem_region_fn p4_region_itlb_data = {
  1593         mmu_itlb_data_read, mmu_itlb_data_write,
  1594         mmu_itlb_data_read, mmu_itlb_data_write,
  1595         mmu_itlb_data_read, mmu_itlb_data_write,
  1596         unmapped_read_burst, unmapped_write_burst,
  1597         unmapped_prefetch, mmu_itlb_data_read };
  1598 struct mem_region_fn p4_region_utlb_addr = {
  1599         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1600         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1601         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1602         unmapped_read_burst, unmapped_write_burst,
  1603         unmapped_prefetch, mmu_utlb_addr_read };
  1604 struct mem_region_fn p4_region_utlb_data = {
  1605         mmu_utlb_data_read, mmu_utlb_data_write,
  1606         mmu_utlb_data_read, mmu_utlb_data_write,
  1607         mmu_utlb_data_read, mmu_utlb_data_write,
  1608         unmapped_read_burst, unmapped_write_burst,
  1609         unmapped_prefetch, mmu_utlb_data_read };
  1611 /********************** Error regions **************************/
  1613 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1615     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1616     SH4_EXCEPTION_EXIT();
  1619 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc ) 
  1621     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1622     SH4_EXCEPTION_EXIT();
  1625 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1627     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1628     SH4_EXCEPTION_EXIT();
  1631 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1633     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1634     SH4_EXCEPTION_EXIT();
  1637 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1639     mmu_urc++;
  1640     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1641     SH4_EXCEPTION_EXIT();
  1644 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
  1646     mmu_urc++;
  1647     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1648     SH4_EXCEPTION_EXIT();
  1651 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1653     mmu_urc++;
  1654     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1655     SH4_EXCEPTION_EXIT();
  1658 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1660     mmu_urc++;
  1661     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1662     SH4_EXCEPTION_EXIT();
  1665 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1667     mmu_urc++;
  1668     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1669     SH4_EXCEPTION_EXIT();
  1670     return 0; 
  1673 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
  1675     mmu_urc++;
  1676     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1677     SH4_EXCEPTION_EXIT();
  1678     return 0;
  1681 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1683     mmu_urc++;
  1684     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1685     SH4_EXCEPTION_EXIT();
  1686     return 0;
  1689 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1691     mmu_urc++;
  1692     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1693     SH4_EXCEPTION_EXIT();
  1696 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1698     mmu_urc++;
  1699     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1700     SH4_EXCEPTION_EXIT();
  1703 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
  1705     mmu_urc++;
  1706     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1707     SH4_EXCEPTION_EXIT();
  1708     return 0;
  1711 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1713     sh4_raise_tlb_multihit(addr);
  1714     SH4_EXCEPTION_EXIT();
  1715     return 0; 
  1718 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1720     sh4_raise_tlb_multihit(addr);
  1721     SH4_EXCEPTION_EXIT();
  1722     return 0; 
  1724 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1726     sh4_raise_tlb_multihit(addr);
  1727     SH4_EXCEPTION_EXIT();
  1730 /**
  1731  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1732  */
  1733 struct mem_region_fn mem_region_address_error = {
  1734         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1735         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1736         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1737         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1738         unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
  1740 struct mem_region_fn mem_region_tlb_miss = {
  1741         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1742         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1743         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1744         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1745         unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
  1747 struct mem_region_fn mem_region_tlb_protected = {
  1748         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1749         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1750         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1751         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1752         unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
  1754 struct mem_region_fn mem_region_tlb_multihit = {
  1755         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1756         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1757         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1758         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1759         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
  1762 /* Store-queue regions */
  1763 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1764  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1765  * some cases), in contrast to the ordinary fields above.
  1767  * There is probably a simpler way to do this.
  1768  */
  1770 struct mem_region_fn p4_region_storequeue = { 
  1771         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1772         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1773         unmapped_read_long, unmapped_write_long,
  1774         unmapped_read_burst, unmapped_write_burst,
  1775         ccn_storequeue_prefetch, unmapped_read_long }; 
  1777 struct mem_region_fn p4_region_storequeue_miss = { 
  1778         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1779         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1780         unmapped_read_long, unmapped_write_long,
  1781         unmapped_read_burst, unmapped_write_burst,
  1782         (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long }; 
  1784 struct mem_region_fn p4_region_storequeue_multihit = { 
  1785         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1786         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1787         unmapped_read_long, unmapped_write_long,
  1788         unmapped_read_burst, unmapped_write_burst,
  1789         (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long }; 
  1791 struct mem_region_fn p4_region_storequeue_protected = {
  1792         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1793         unmapped_read_long, unmapped_write_long,
  1794         unmapped_read_long, unmapped_write_long,
  1795         unmapped_read_burst, unmapped_write_burst,
  1796         (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
  1798 struct mem_region_fn p4_region_storequeue_sqmd = {
  1799         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1800         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1801         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1802         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1803         (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
  1805 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1806         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1807         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1808         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1809         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1810         (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write }; 
  1812 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1813         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1814         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1815         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1816         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1817         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
  1819 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1820         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1821         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1822         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1823         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1824         (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.