Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 975:007bf7eb944f
prev973:7434ac745eff
next980:deb4361928fe
author nkeynes
date Mon Jan 26 07:26:24 2009 +0000 (15 years ago)
permissions -rw-r--r--
last change Add read_byte_for_write mem function for correct implementation of AND.B and friends
with TLB enabled.
Add read_byte and read_long MMIO stubs to do correct sign extension of IO reads
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
    31 #define RAISE_MEM_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_exception(code);
    35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
    37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    38 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    40 /* Primary address space (used directly by SH4 cores) */
    41 mem_region_fn_t *sh4_address_space;
    42 mem_region_fn_t *sh4_user_address_space;
    44 /* Accessed from the UTLB accessor methods */
    45 uint32_t mmu_urc;
    46 uint32_t mmu_urb;
    47 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
    49 /* Module globals */
    50 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    51 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    52 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    53 static uint32_t mmu_lrui;
    54 static uint32_t mmu_asid; // current asid
    55 static struct utlb_default_regions *mmu_user_storequeue_regions;
    57 /* Structures for 1K page handling */
    58 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    59 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    60 static int mmu_utlb_1k_free_index;
    63 /* Function prototypes */
    64 static void mmu_invalidate_tlb();
    65 static void mmu_utlb_register_all();
    66 static void mmu_utlb_remove_entry(int);
    67 static void mmu_utlb_insert_entry(int);
    68 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    69 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    70 static void mmu_set_tlb_enabled( int tlb_on );
    71 static void mmu_set_tlb_asid( uint32_t asid );
    72 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    73 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    74 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    75 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    76 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    77 static void mmu_utlb_1k_init();
    78 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    79 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    80 static int mmu_read_urc();
    82 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    83 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    84 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    85 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
    86 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    87 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
    88 static uint32_t get_tlb_size_mask( uint32_t flags );
    89 static uint32_t get_tlb_size_pages( uint32_t flags );
    91 #define DEFAULT_REGIONS 0
    92 #define DEFAULT_STOREQUEUE_REGIONS 1
    93 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    95 static struct utlb_default_regions mmu_default_regions[3] = {
    96         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    97         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
    98         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
   100 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
   102 /*********************** Module public functions ****************************/
   104 /**
   105  * Allocate memory for the address space maps, and initialize them according
   106  * to the default (reset) values. (TLB is disabled by default)
   107  */
   109 void MMU_init()
   110 {
   111     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   112     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   113     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   115     mmu_set_tlb_enabled(0);
   116     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   117     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   119     /* Setup P4 tlb/cache access regions */
   120     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   121     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   122     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   123     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   124     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   125     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   126     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   127     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   128     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   129     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   130     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   132     /* Setup P4 control region */
   133     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   134     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   135     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   136     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   137     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   138     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   139     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   140     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   141     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   142     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   143     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   144     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   145     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   147     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   148     mmu_utlb_1k_init();
   150     /* Ensure the code regions are executable (64-bit only). Although it might
   151      * be more portable to mmap these at runtime rather than using static decls
   152      */
   153 #if SIZEOF_VOID_P == 8
   154     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   155     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   156 #endif
   157 }
   159 void MMU_reset()
   160 {
   161     mmio_region_MMU_write( CCR, 0 );
   162     mmio_region_MMU_write( MMUCR, 0 );
   163 }
   165 void MMU_save_state( FILE *f )
   166 {
   167     mmu_read_urc();   
   168     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   169     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   170     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   171     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   172     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   173     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   174 }
   176 int MMU_load_state( FILE *f )
   177 {
   178     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   179         return 1;
   180     }
   181     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   182         return 1;
   183     }
   184     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   185         return 1;
   186     }
   187     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   188         return 1;
   189     }
   190     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   191         return 1;
   192     }
   193     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   194         return 1;
   195     }
   197     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   198     mmu_urc_overflow = mmu_urc >= mmu_urb;
   199     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   200     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   201     return 0;
   202 }
   204 /**
   205  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   206  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   207  */
   208 void MMU_ldtlb()
   209 {
   210     int urc = mmu_read_urc();
   211     if( mmu_utlb[urc].flags & TLB_VALID )
   212         mmu_utlb_remove_entry( urc );
   213     mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   214     mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   215     mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   216     mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   217     mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
   218     mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
   219     if( mmu_utlb[urc].flags & TLB_VALID )
   220         mmu_utlb_insert_entry( urc );
   221 }
   224 MMIO_REGION_READ_FN( MMU, reg )
   225 {
   226     reg &= 0xFFF;
   227     switch( reg ) {
   228     case MMUCR:
   229         return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   230     default:
   231         return MMIO_READ( MMU, reg );
   232     }
   233 }
   235 MMIO_REGION_READ_DEFSUBFNS(MMU)
   237 MMIO_REGION_WRITE_FN( MMU, reg, val )
   238 {
   239     uint32_t tmp;
   240     reg &= 0xFFF;
   241     switch(reg) {
   242     case SH4VER:
   243         return;
   244     case PTEH:
   245         val &= 0xFFFFFCFF;
   246         if( (val & 0xFF) != mmu_asid ) {
   247             mmu_set_tlb_asid( val&0xFF );
   248             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   249         }
   250         break;
   251     case PTEL:
   252         val &= 0x1FFFFDFF;
   253         break;
   254     case PTEA:
   255         val &= 0x0000000F;
   256         break;
   257     case TRA:
   258         val &= 0x000003FC;
   259         break;
   260     case EXPEVT:
   261     case INTEVT:
   262         val &= 0x00000FFF;
   263         break;
   264     case MMUCR:
   265         if( val & MMUCR_TI ) {
   266             mmu_invalidate_tlb();
   267         }
   268         mmu_urc = (val >> 10) & 0x3F;
   269         mmu_urb = (val >> 18) & 0x3F;
   270         if( mmu_urb == 0 ) {
   271             mmu_urb = 0x40;
   272         } else if( mmu_urc >= mmu_urb ) {
   273             mmu_urc_overflow = TRUE;
   274         }
   275         mmu_lrui = (val >> 26) & 0x3F;
   276         val &= 0x00000301;
   277         tmp = MMIO_READ( MMU, MMUCR );
   278         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   279             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   280         }
   281         if( (val ^ tmp) & (MMUCR_AT) ) {
   282             // AT flag has changed state - flush the xlt cache as all bets
   283             // are off now. We also need to force an immediate exit from the
   284             // current block
   285             mmu_set_tlb_enabled( val & MMUCR_AT );
   286             MMIO_WRITE( MMU, MMUCR, val );
   287             sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
   288             xlat_flush_cache(); // If we're not running, flush the cache anyway
   289         }
   290         break;
   291     case CCR:
   292         CCN_set_cache_control( val );
   293         val &= 0x81A7;
   294         break;
   295     case MMUUNK1:
   296         /* Note that if the high bit is set, this appears to reset the machine.
   297          * Not emulating this behaviour yet until we know why...
   298          */
   299         val &= 0x00010007;
   300         break;
   301     case QACR0:
   302     case QACR1:
   303         val &= 0x0000001C;
   304         break;
   305     case PMCR1:
   306         PMM_write_control(0, val);
   307         val &= 0x0000C13F;
   308         break;
   309     case PMCR2:
   310         PMM_write_control(1, val);
   311         val &= 0x0000C13F;
   312         break;
   313     default:
   314         break;
   315     }
   316     MMIO_WRITE( MMU, reg, val );
   317 }
   319 /********************** 1K Page handling ***********************/
   320 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   321  * effort to manage - we justify this on the basis that most programs won't
   322  * actually use 1K pages, so we may as well optimize for the common case.
   323  * 
   324  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   325  * redirects requests to the 'real' page entry. These are allocated on an
   326  * as-needed basis, and returned to the pool when all subpages are empty.
   327  */ 
   328 static void mmu_utlb_1k_init()
   329 {
   330     int i;
   331     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   332         mmu_utlb_1k_free_list[i] = i;
   333         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   334     }
   335     mmu_utlb_1k_free_index = 0;
   336 }
   338 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   339 {
   340     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   341     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
   342     return entry;
   343 }    
   345 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   346 {
   347     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   348     assert( entryNo < UTLB_ENTRY_COUNT );
   349     assert( mmu_utlb_1k_free_index > 0 );
   350     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   351 }
   354 /********************** Address space maintenance *************************/
   356 /**
   357  * MMU accessor functions just increment URC - fixup here if necessary
   358  */
   359 static int mmu_read_urc()
   360 {
   361     if( mmu_urc_overflow ) {
   362         if( mmu_urc >= 0x40 ) {
   363             mmu_urc_overflow = FALSE;
   364             mmu_urc -= 0x40;
   365             mmu_urc %= mmu_urb;
   366         }
   367     } else {
   368         mmu_urc %= mmu_urb;
   369     }
   370     return mmu_urc;
   371 }
   373 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   374 {
   375     int count = (end - start) >> 12;
   376     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   377     while( count-- > 0 ) {
   378         *ptr++ = fn;
   379     }
   380 }
   381 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   382 {
   383     int count = (end - start) >> 12;
   384     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   385     while( count-- > 0 ) {
   386         *ptr++ = fn;
   387     }
   388 }
   390 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   391 {
   392     int i;
   393     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   394         /* TLB on */
   395         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   396         sh4_address_space[(page|0xA0000000)>>12] = fn;
   397         /* Scan UTLB and update any direct-referencing entries */
   398     } else {
   399         /* Direct map to U0, P0, P1, P2, P3 */
   400         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   401             sh4_address_space[(page|i)>>12] = fn;
   402         }
   403         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   404             sh4_user_address_space[(page|i)>>12] = fn;
   405         }
   406     }
   407     return TRUE;
   408 }
   410 static void mmu_set_tlb_enabled( int tlb_on )
   411 {
   412     mem_region_fn_t *ptr, *uptr;
   413     int i;
   415     /* Reset the storequeue area */
   417     if( tlb_on ) {
   418         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   419         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   420         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   422         /* Default SQ prefetch goes to TLB miss (?) */
   423         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   424         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   425         mmu_utlb_register_all();
   426     } else {
   427         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   428             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   429         }
   430         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   431             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   432         }
   434         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   435         if( IS_STOREQUEUE_PROTECTED() ) {
   436             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   437         } else {
   438             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   439         }
   440     }
   442 }
   444 /**
   445  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   446  * anything expects to do this frequently.
   447  */
   448 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   449 {
   450     mem_region_fn_t nontlb_region;
   451     int i;
   453     if( protected ) {
   454         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   455         nontlb_region = &p4_region_storequeue_sqmd;
   456     } else {
   457         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   458         nontlb_region = &p4_region_storequeue; 
   459     }
   461     if( tlb_on ) {
   462         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   463         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   464             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   465                 mmu_utlb_insert_entry(i);
   466             }
   467         }
   468     } else {
   469         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   470     }
   472 }
   474 static void mmu_set_tlb_asid( uint32_t asid )
   475 {
   476     /* Scan for pages that need to be remapped */
   477     int i;
   478     if( IS_SV_ENABLED() ) {
   479         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   480             if( mmu_utlb[i].asid == mmu_asid && 
   481                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   482                 // Matches old ASID - unmap out
   483                 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   484                         get_tlb_size_pages(mmu_utlb[i].flags) ) )
   485                     mmu_utlb_remap_pages( FALSE, TRUE, i );
   486             }
   487         }
   488         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   489             if( mmu_utlb[i].asid == asid && 
   490                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   491                 // Matches new ASID - map in
   492                 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   493                         mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   494                         get_tlb_size_pages(mmu_utlb[i].flags) );
   495             }
   496         }
   497     } else {
   498         // Remap both Priv+user pages
   499         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   500             if( mmu_utlb[i].asid == mmu_asid &&
   501                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   502                 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   503                         get_tlb_size_pages(mmu_utlb[i].flags) ) )
   504                     mmu_utlb_remap_pages( TRUE, TRUE, i );
   505             }
   506         }
   507         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   508             if( mmu_utlb[i].asid == asid &&
   509                 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   510                 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   511                         mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   512                         get_tlb_size_pages(mmu_utlb[i].flags) );  
   513             }
   514         }
   515     }
   517     mmu_asid = asid;
   518 }
   520 static uint32_t get_tlb_size_mask( uint32_t flags )
   521 {
   522     switch( flags & TLB_SIZE_MASK ) {
   523     case TLB_SIZE_1K: return MASK_1K;
   524     case TLB_SIZE_4K: return MASK_4K;
   525     case TLB_SIZE_64K: return MASK_64K;
   526     case TLB_SIZE_1M: return MASK_1M;
   527     default: return 0; /* Unreachable */
   528     }
   529 }
   530 static uint32_t get_tlb_size_pages( uint32_t flags )
   531 {
   532     switch( flags & TLB_SIZE_MASK ) {
   533     case TLB_SIZE_1K: return 0;
   534     case TLB_SIZE_4K: return 1;
   535     case TLB_SIZE_64K: return 16;
   536     case TLB_SIZE_1M: return 256;
   537     default: return 0; /* Unreachable */
   538     }
   539 }
   541 /**
   542  * Add a new TLB entry mapping to the address space table. If any of the pages
   543  * are already mapped, they are mapped to the TLB multi-hit page instead.
   544  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   545  */ 
   546 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   547 {
   548     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   549     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   550     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   551     struct utlb_default_regions *userdefs = privdefs;    
   553     gboolean mapping_ok = TRUE;
   554     int i;
   556     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   557         /* Storequeue mapping */
   558         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   559         userdefs = mmu_user_storequeue_regions;
   560     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   561         user_page = NULL; /* No user access to P3 region */
   562     } else if( start_addr >= 0x80000000 ) {
   563         return TRUE; // No mapping - legal but meaningless
   564     }
   566     if( npages == 0 ) {
   567         struct utlb_1k_entry *ent;
   568         int i, idx = (start_addr >> 10) & 0x03;
   569         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   570             ent = (struct utlb_1k_entry *)*ptr;
   571         } else {
   572             ent = mmu_utlb_1k_alloc();
   573             /* New 1K struct - init to previous contents of region */
   574             for( i=0; i<4; i++ ) {
   575                 ent->subpages[i] = *ptr;
   576                 ent->user_subpages[i] = *uptr;
   577             }
   578             *ptr = &ent->fn;
   579             *uptr = &ent->user_fn;
   580         }
   582         if( priv_page != NULL ) {
   583             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   584                 ent->subpages[idx] = priv_page;
   585             } else {
   586                 mapping_ok = FALSE;
   587                 ent->subpages[idx] = privdefs->tlb_multihit;
   588             }
   589         }
   590         if( user_page != NULL ) {
   591             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   592                 ent->user_subpages[idx] = user_page;
   593             } else {
   594                 mapping_ok = FALSE;
   595                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   596             }
   597         }
   599     } else {
   600         if( priv_page != NULL ) {
   601             /* Privileged mapping only */
   602             for( i=0; i<npages; i++ ) {
   603                 if( *ptr == privdefs->tlb_miss ) {
   604                     *ptr++ = priv_page;
   605                 } else {
   606                     mapping_ok = FALSE;
   607                     *ptr++ = privdefs->tlb_multihit;
   608                 }
   609             }
   610         }
   611         if( user_page != NULL ) {
   612             /* User mapping only (eg ASID change remap w/ SV=1) */
   613             for( i=0; i<npages; i++ ) {
   614                 if( *uptr == userdefs->tlb_miss ) {
   615                     *uptr++ = user_page;
   616                 } else {
   617                     mapping_ok = FALSE;
   618                     *uptr++ = userdefs->tlb_multihit;
   619                 }
   620             }        
   621         }
   622     }
   624     return mapping_ok;
   625 }
   627 /**
   628  * Remap any pages within the region covered by entryNo, but not including 
   629  * entryNo itself. This is used to reestablish pages that were previously
   630  * covered by a multi-hit exception region when one of the pages is removed.
   631  */
   632 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   633 {
   634     int mask = mmu_utlb[entryNo].mask;
   635     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   636     int i;
   638     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   639         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   640             /* Overlapping region */
   641             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   642             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   643             uint32_t start_addr;
   644             int npages;
   646             if( mmu_utlb[i].mask >= mask ) {
   647                 /* entry is no larger than the area we're replacing - map completely */
   648                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   649                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   650             } else {
   651                 /* Otherwise map subset - region covered by removed page */
   652                 start_addr = remap_addr;
   653                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   654             }
   656             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   657                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   658             } else if( IS_SV_ENABLED() ) {
   659                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   660             }
   662         }
   663     }
   664 }
   666 /**
   667  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   668  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   669  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   670  */
   671 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   672 {
   673     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   674     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   675     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   676     struct utlb_default_regions *userdefs = privdefs;
   678     gboolean unmapping_ok = TRUE;
   679     int i;
   681     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   682         /* Storequeue mapping */
   683         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   684         userdefs = mmu_user_storequeue_regions;
   685     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   686         unmap_user = FALSE;
   687     } else if( start_addr >= 0x80000000 ) {
   688         return TRUE; // No mapping - legal but meaningless
   689     }
   691     if( npages == 0 ) { // 1K page
   692         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   693         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   694         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   695         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   696             unmapping_ok = FALSE;
   697         }
   698         if( unmap_priv )
   699             ent->subpages[idx] = privdefs->tlb_miss;
   700         if( unmap_user )
   701             ent->user_subpages[idx] = userdefs->tlb_miss;
   703         /* If all 4 subpages have the same content, merge them together and
   704          * release the 1K entry
   705          */
   706         mem_region_fn_t priv_page = ent->subpages[0];
   707         mem_region_fn_t user_page = ent->user_subpages[0];
   708         for( i=1; i<4; i++ ) {
   709             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   710                 mergeable = 0;
   711                 break;
   712             }
   713         }
   714         if( mergeable ) {
   715             mmu_utlb_1k_free(ent);
   716             *ptr = priv_page;
   717             *uptr = user_page;
   718         }
   719     } else {
   720         if( unmap_priv ) {
   721             /* Privileged (un)mapping */
   722             for( i=0; i<npages; i++ ) {
   723                 if( *ptr == privdefs->tlb_multihit ) {
   724                     unmapping_ok = FALSE;
   725                 }
   726                 *ptr++ = privdefs->tlb_miss;
   727             }
   728         }
   729         if( unmap_user ) {
   730             /* User (un)mapping */
   731             for( i=0; i<npages; i++ ) {
   732                 if( *uptr == userdefs->tlb_multihit ) {
   733                     unmapping_ok = FALSE;
   734                 }
   735                 *uptr++ = userdefs->tlb_miss;
   736             }            
   737         }
   738     }
   740     return unmapping_ok;
   741 }
   743 static void mmu_utlb_insert_entry( int entry )
   744 {
   745     struct utlb_entry *ent = &mmu_utlb[entry];
   746     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   747     mem_region_fn_t upage;
   748     sh4addr_t start_addr = ent->vpn & ent->mask;
   749     int npages = get_tlb_size_pages(ent->flags);
   751     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   752         /* Store queue mappings are a bit different - normal access is fixed to
   753          * the store queue register block, and we only map prefetches through
   754          * the TLB 
   755          */
   756         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   758         if( (ent->flags & TLB_USERMODE) == 0 ) {
   759             upage = mmu_user_storequeue_regions->tlb_prot;
   760         } else if( IS_STOREQUEUE_PROTECTED() ) {
   761             upage = &p4_region_storequeue_sqmd;
   762         } else {
   763             upage = page;
   764         }
   766     }  else {
   768         if( (ent->flags & TLB_USERMODE) == 0 ) {
   769             upage = &mem_region_tlb_protected;
   770         } else {        
   771             upage = page;
   772         }
   774         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   775             page->write_long = (mem_write_fn_t)tlb_protected_write;
   776             page->write_word = (mem_write_fn_t)tlb_protected_write;
   777             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   778             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   779             page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
   780             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   781         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   782             page->write_long = (mem_write_fn_t)tlb_initial_write;
   783             page->write_word = (mem_write_fn_t)tlb_initial_write;
   784             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   785             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   786             page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
   787             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   788         } else {
   789             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   790         }
   791     }
   793     mmu_utlb_pages[entry].user_fn = upage;
   795     /* Is page visible? */
   796     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   797         mmu_utlb_map_pages( page, upage, start_addr, npages );
   798     } else if( IS_SV_ENABLED() ) {
   799         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   800     }
   801 }
   803 static void mmu_utlb_remove_entry( int entry )
   804 {
   805     int i, j;
   806     struct utlb_entry *ent = &mmu_utlb[entry];
   807     sh4addr_t start_addr = ent->vpn&ent->mask;
   808     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   809     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   810     gboolean unmap_user;
   811     int npages = get_tlb_size_pages(ent->flags);
   813     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   814         unmap_user = TRUE;
   815     } else if( IS_SV_ENABLED() ) {
   816         unmap_user = FALSE;
   817     } else {
   818         return; // Not mapped
   819     }
   821     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   823     if( !clean_unmap ) {
   824         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   825     }
   826 }
   828 static void mmu_utlb_register_all()
   829 {
   830     int i;
   831     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   832         if( mmu_utlb[i].flags & TLB_VALID ) 
   833             mmu_utlb_insert_entry( i );
   834     }
   835 }
   837 static void mmu_invalidate_tlb()
   838 {
   839     int i;
   840     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   841         mmu_itlb[i].flags &= (~TLB_VALID);
   842     }
   843     if( IS_TLB_ENABLED() ) {
   844         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   845             if( mmu_utlb[i].flags & TLB_VALID ) {
   846                 mmu_utlb_remove_entry( i );
   847             }
   848         }
   849     }
   850     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   851         mmu_utlb[i].flags &= (~TLB_VALID);
   852     }
   853 }
   855 /******************************************************************************/
   856 /*                        MMU TLB address translation                         */
   857 /******************************************************************************/
   859 /**
   860  * Translate a 32-bit address into a UTLB entry number. Does not check for
   861  * page protection etc.
   862  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   863  */
   864 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   865 {
   866     mmu_urc++;
   867     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   868     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   869         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   870     } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
   871         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
   872         fn = ent->subpages[(vpn>>10)&0x03];
   873         if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   874             return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   875         }            
   876     } else if( fn == &mem_region_tlb_multihit ) {
   877         return -2;
   878     } else {
   879         return -1;
   880     }
   881 }
   884 /**
   885  * Perform the actual utlb lookup w/ asid matching.
   886  * Possible utcomes are:
   887  *   0..63 Single match - good, return entry found
   888  *   -1 No match - raise a tlb data miss exception
   889  *   -2 Multiple matches - raise a multi-hit exception (reset)
   890  * @param vpn virtual address to resolve
   891  * @return the resultant UTLB entry, or an error.
   892  */
   893 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   894 {
   895     int result = -1;
   896     unsigned int i;
   898     mmu_urc++;
   899     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   900         mmu_urc = 0;
   901     }
   903     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   904         if( (mmu_utlb[i].flags & TLB_VALID) &&
   905                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   906                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   907             if( result != -1 ) {
   908                 return -2;
   909             }
   910             result = i;
   911         }
   912     }
   913     return result;
   914 }
   916 /**
   917  * Perform the actual utlb lookup matching on vpn only
   918  * Possible utcomes are:
   919  *   0..63 Single match - good, return entry found
   920  *   -1 No match - raise a tlb data miss exception
   921  *   -2 Multiple matches - raise a multi-hit exception (reset)
   922  * @param vpn virtual address to resolve
   923  * @return the resultant UTLB entry, or an error.
   924  */
   925 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   926 {
   927     int result = -1;
   928     unsigned int i;
   930     mmu_urc++;
   931     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   932         mmu_urc = 0;
   933     }
   935     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   936         if( (mmu_utlb[i].flags & TLB_VALID) &&
   937                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   938             if( result != -1 ) {
   939                 return -2;
   940             }
   941             result = i;
   942         }
   943     }
   945     return result;
   946 }
   948 /**
   949  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   950  * @return the number (0-3) of the replaced entry.
   951  */
   952 static int inline mmu_itlb_update_from_utlb( int entryNo )
   953 {
   954     int replace;
   955     /* Determine entry to replace based on lrui */
   956     if( (mmu_lrui & 0x38) == 0x38 ) {
   957         replace = 0;
   958         mmu_lrui = mmu_lrui & 0x07;
   959     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   960         replace = 1;
   961         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   962     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   963         replace = 2;
   964         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   965     } else { // Note - gets invalid entries too
   966         replace = 3;
   967         mmu_lrui = (mmu_lrui | 0x0B);
   968     }
   970     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   971     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   972     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   973     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   974     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   975     return replace;
   976 }
   978 /**
   979  * Perform the actual itlb lookup w/ asid protection
   980  * Possible utcomes are:
   981  *   0..63 Single match - good, return entry found
   982  *   -1 No match - raise a tlb data miss exception
   983  *   -2 Multiple matches - raise a multi-hit exception (reset)
   984  * @param vpn virtual address to resolve
   985  * @return the resultant ITLB entry, or an error.
   986  */
   987 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   988 {
   989     int result = -1;
   990     unsigned int i;
   992     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   993         if( (mmu_itlb[i].flags & TLB_VALID) &&
   994                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   995                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   996             if( result != -1 ) {
   997                 return -2;
   998             }
   999             result = i;
  1003     if( result == -1 ) {
  1004         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
  1005         if( utlbEntry < 0 ) {
  1006             return utlbEntry;
  1007         } else {
  1008             return mmu_itlb_update_from_utlb( utlbEntry );
  1012     switch( result ) {
  1013     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1014     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1015     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1016     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1019     return result;
  1022 /**
  1023  * Perform the actual itlb lookup on vpn only
  1024  * Possible utcomes are:
  1025  *   0..63 Single match - good, return entry found
  1026  *   -1 No match - raise a tlb data miss exception
  1027  *   -2 Multiple matches - raise a multi-hit exception (reset)
  1028  * @param vpn virtual address to resolve
  1029  * @return the resultant ITLB entry, or an error.
  1030  */
  1031 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
  1033     int result = -1;
  1034     unsigned int i;
  1036     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1037         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1038                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1039             if( result != -1 ) {
  1040                 return -2;
  1042             result = i;
  1046     if( result == -1 ) {
  1047         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1048         if( utlbEntry < 0 ) {
  1049             return utlbEntry;
  1050         } else {
  1051             return mmu_itlb_update_from_utlb( utlbEntry );
  1055     switch( result ) {
  1056     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1057     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1058     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1059     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1062     return result;
  1065 /**
  1066  * Update the icache for an untranslated address
  1067  */
  1068 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1070     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1071         /* Main ram */
  1072         sh4_icache.page_vma = addr & 0xFF000000;
  1073         sh4_icache.page_ppa = 0x0C000000;
  1074         sh4_icache.mask = 0xFF000000;
  1075         sh4_icache.page = dc_main_ram;
  1076     } else if( (addr & 0x1FE00000) == 0 ) {
  1077         /* BIOS ROM */
  1078         sh4_icache.page_vma = addr & 0xFFE00000;
  1079         sh4_icache.page_ppa = 0;
  1080         sh4_icache.mask = 0xFFE00000;
  1081         sh4_icache.page = dc_boot_rom;
  1082     } else {
  1083         /* not supported */
  1084         sh4_icache.page_vma = -1;
  1088 /**
  1089  * Update the sh4_icache structure to describe the page(s) containing the
  1090  * given vma. If the address does not reference a RAM/ROM region, the icache
  1091  * will be invalidated instead.
  1092  * If AT is on, this method will raise TLB exceptions normally
  1093  * (hence this method should only be used immediately prior to execution of
  1094  * code), and otherwise will set the icache according to the matching TLB entry.
  1095  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1096  * the icache.
  1097  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1098  * if an exception was raised.
  1099  */
  1100 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1102     int entryNo;
  1103     if( IS_SH4_PRIVMODE()  ) {
  1104         if( addr & 0x80000000 ) {
  1105             if( addr < 0xC0000000 ) {
  1106                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1107                 mmu_update_icache_phys(addr);
  1108                 return TRUE;
  1109             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1110                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1111                 return FALSE;
  1115         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1116         if( (mmucr & MMUCR_AT) == 0 ) {
  1117             mmu_update_icache_phys(addr);
  1118             return TRUE;
  1121         if( (mmucr & MMUCR_SV) == 0 )
  1122         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1123         else
  1124         	entryNo = mmu_itlb_lookup_vpn( addr );
  1125     } else {
  1126         if( addr & 0x80000000 ) {
  1127             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1128             return FALSE;
  1131         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1132         if( (mmucr & MMUCR_AT) == 0 ) {
  1133             mmu_update_icache_phys(addr);
  1134             return TRUE;
  1137         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1139         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1140             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1141             return FALSE;
  1145     switch(entryNo) {
  1146     case -1:
  1147     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1148     return FALSE;
  1149     case -2:
  1150     RAISE_TLB_MULTIHIT_ERROR(addr);
  1151     return FALSE;
  1152     default:
  1153         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1154         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1155         if( sh4_icache.page == NULL ) {
  1156             sh4_icache.page_vma = -1;
  1157         } else {
  1158             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1159             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1161         return TRUE;
  1165 /**
  1166  * Translate address for disassembly purposes (ie performs an instruction
  1167  * lookup) - does not raise exceptions or modify any state, and ignores
  1168  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1169  * on translation failure.
  1170  */
  1171 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1173     if( vma & 0x80000000 ) {
  1174         if( vma < 0xC0000000 ) {
  1175             /* P1, P2 and P4 regions are pass-through (no translation) */
  1176             return VMA_TO_EXT_ADDR(vma);
  1177         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1178             /* Not translatable */
  1179             return MMU_VMA_ERROR;
  1183     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1184     if( (mmucr & MMUCR_AT) == 0 ) {
  1185         return VMA_TO_EXT_ADDR(vma);
  1188     int entryNo = mmu_itlb_lookup_vpn( vma );
  1189     if( entryNo == -2 ) {
  1190         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1192     if( entryNo < 0 ) {
  1193         return MMU_VMA_ERROR;
  1194     } else {
  1195         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1196         (vma & (~mmu_itlb[entryNo].mask));
  1200 /********************** TLB Direct-Access Regions ***************************/
  1201 #ifdef HAVE_FRAME_ADDRESS
  1202 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; } while(0)
  1203 #else
  1204 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1205 #endif
  1208 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1210 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1212     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1213     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1216 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1218     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1219     ent->vpn = val & 0xFFFFFC00;
  1220     ent->asid = val & 0x000000FF;
  1221     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1224 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1226     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1227     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1230 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1232     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1233     ent->ppn = val & 0x1FFFFC00;
  1234     ent->flags = val & 0x00001DA;
  1235     ent->mask = get_tlb_size_mask(val);
  1236     if( ent->ppn >= 0x1C000000 )
  1237         ent->ppn |= 0xE0000000;
  1240 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1241 #define UTLB_ASSOC(addr) (addr&0x80)
  1242 #define UTLB_DATA2(addr) (addr&0x00800000)
  1244 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1246     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1247     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1248     ((ent->flags & TLB_DIRTY)<<7);
  1250 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1252     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1253     if( UTLB_DATA2(addr) ) {
  1254         return ent->pcmcia;
  1255     } else {
  1256         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1260 /**
  1261  * Find a UTLB entry for the associative TLB write - same as the normal
  1262  * lookup but ignores the valid bit.
  1263  */
  1264 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1266     int result = -1;
  1267     unsigned int i;
  1268     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1269         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1270                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1271                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1272             if( result != -1 ) {
  1273                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1274                 return -2;
  1276             result = i;
  1279     return result;
  1282 /**
  1283  * Find a ITLB entry for the associative TLB write - same as the normal
  1284  * lookup but ignores the valid bit.
  1285  */
  1286 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1288     int result = -1;
  1289     unsigned int i;
  1290     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1291         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1292                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1293                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1294             if( result != -1 ) {
  1295                 return -2;
  1297             result = i;
  1300     return result;
  1303 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1305     if( UTLB_ASSOC(addr) ) {
  1306         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1307         if( utlb >= 0 ) {
  1308             struct utlb_entry *ent = &mmu_utlb[utlb];
  1309             uint32_t old_flags = ent->flags;
  1310             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1311             ent->flags |= (val & TLB_VALID);
  1312             ent->flags |= ((val & 0x200)>>7);
  1313             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1314                 if( old_flags & TLB_VALID )
  1315                     mmu_utlb_remove_entry( utlb );
  1316                 if( ent->flags & TLB_VALID )
  1317                     mmu_utlb_insert_entry( utlb );
  1321         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1322         if( itlb >= 0 ) {
  1323             struct itlb_entry *ent = &mmu_itlb[itlb];
  1324             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1327         if( itlb == -2 || utlb == -2 ) {
  1328             RAISE_TLB_MULTIHIT_ERROR(addr);
  1329             EXCEPTION_EXIT();
  1330             return;
  1332     } else {
  1333         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1334         if( ent->flags & TLB_VALID ) 
  1335             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1336         ent->vpn = (val & 0xFFFFFC00);
  1337         ent->asid = (val & 0xFF);
  1338         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1339         ent->flags |= (val & TLB_VALID);
  1340         ent->flags |= ((val & 0x200)>>7);
  1341         if( ent->flags & TLB_VALID ) 
  1342             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1346 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1348     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1349     if( UTLB_DATA2(addr) ) {
  1350         ent->pcmcia = val & 0x0000000F;
  1351     } else {
  1352         if( ent->flags & TLB_VALID ) 
  1353             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1354         ent->ppn = (val & 0x1FFFFC00);
  1355         ent->flags = (val & 0x000001FF);
  1356         ent->mask = get_tlb_size_mask(val);
  1357         if( ent->flags & TLB_VALID ) 
  1358             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1362 struct mem_region_fn p4_region_itlb_addr = {
  1363         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1364         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1365         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1366         unmapped_read_burst, unmapped_write_burst,
  1367         unmapped_prefetch, mmu_itlb_addr_read };
  1368 struct mem_region_fn p4_region_itlb_data = {
  1369         mmu_itlb_data_read, mmu_itlb_data_write,
  1370         mmu_itlb_data_read, mmu_itlb_data_write,
  1371         mmu_itlb_data_read, mmu_itlb_data_write,
  1372         unmapped_read_burst, unmapped_write_burst,
  1373         unmapped_prefetch, mmu_itlb_data_read };
  1374 struct mem_region_fn p4_region_utlb_addr = {
  1375         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1376         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1377         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1378         unmapped_read_burst, unmapped_write_burst,
  1379         unmapped_prefetch, mmu_utlb_addr_read };
  1380 struct mem_region_fn p4_region_utlb_data = {
  1381         mmu_utlb_data_read, mmu_utlb_data_write,
  1382         mmu_utlb_data_read, mmu_utlb_data_write,
  1383         mmu_utlb_data_read, mmu_utlb_data_write,
  1384         unmapped_read_burst, unmapped_write_burst,
  1385         unmapped_prefetch, mmu_utlb_data_read };
  1387 /********************** Error regions **************************/
  1389 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1391     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1392     EXCEPTION_EXIT();
  1395 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc ) 
  1397     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1398     EXCEPTION_EXIT();
  1401 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1403     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1404     EXCEPTION_EXIT();
  1407 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1409     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1410     EXCEPTION_EXIT();
  1413 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1415     mmu_urc++;
  1416     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1417     EXCEPTION_EXIT();
  1420 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
  1422     mmu_urc++;
  1423     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1424     EXCEPTION_EXIT();
  1427 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1429     mmu_urc++;
  1430     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1431     EXCEPTION_EXIT();
  1434 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1436     mmu_urc++;
  1437     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1438     EXCEPTION_EXIT();
  1441 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1443     mmu_urc++;
  1444     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1445     EXCEPTION_EXIT();
  1446     return 0; 
  1449 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
  1451     mmu_urc++;
  1452     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1453     EXCEPTION_EXIT();
  1454     return 0;
  1457 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1459     mmu_urc++;
  1460     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1461     EXCEPTION_EXIT();
  1462     return 0;
  1465 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1467     mmu_urc++;
  1468     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1469     EXCEPTION_EXIT();
  1472 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1474     mmu_urc++;
  1475     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1476     EXCEPTION_EXIT();
  1479 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
  1481     mmu_urc++;
  1482     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1483     EXCEPTION_EXIT();
  1484     return 0;
  1487 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1489     sh4_raise_tlb_multihit(addr);
  1490     EXCEPTION_EXIT();
  1491     return 0; 
  1494 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1496     sh4_raise_tlb_multihit(addr);
  1497     EXCEPTION_EXIT();
  1498     return 0; 
  1500 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1502     sh4_raise_tlb_multihit(addr);
  1503     EXCEPTION_EXIT();
  1506 /**
  1507  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1508  */
  1509 struct mem_region_fn mem_region_address_error = {
  1510         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1511         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1512         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1513         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1514         unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
  1516 struct mem_region_fn mem_region_tlb_miss = {
  1517         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1518         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1519         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1520         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1521         unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
  1523 struct mem_region_fn mem_region_tlb_protected = {
  1524         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1525         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1526         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1527         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1528         unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
  1530 struct mem_region_fn mem_region_tlb_multihit = {
  1531         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1532         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1533         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1534         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1535         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
  1538 /* Store-queue regions */
  1539 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1540  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1541  * some cases), in contrast to the ordinary fields above.
  1543  * There is probably a simpler way to do this.
  1544  */
  1546 struct mem_region_fn p4_region_storequeue = { 
  1547         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1548         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1549         unmapped_read_long, unmapped_write_long,
  1550         unmapped_read_burst, unmapped_write_burst,
  1551         ccn_storequeue_prefetch, unmapped_read_long }; 
  1553 struct mem_region_fn p4_region_storequeue_miss = { 
  1554         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1555         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1556         unmapped_read_long, unmapped_write_long,
  1557         unmapped_read_burst, unmapped_write_burst,
  1558         (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long }; 
  1560 struct mem_region_fn p4_region_storequeue_multihit = { 
  1561         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1562         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1563         unmapped_read_long, unmapped_write_long,
  1564         unmapped_read_burst, unmapped_write_burst,
  1565         (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long }; 
  1567 struct mem_region_fn p4_region_storequeue_protected = {
  1568         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1569         unmapped_read_long, unmapped_write_long,
  1570         unmapped_read_long, unmapped_write_long,
  1571         unmapped_read_burst, unmapped_write_burst,
  1572         (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
  1574 struct mem_region_fn p4_region_storequeue_sqmd = {
  1575         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1576         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1577         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1578         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1579         (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
  1581 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1582         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1583         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1584         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1585         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1586         (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write }; 
  1588 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1589         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1590         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1591         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1592         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1593         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
  1595 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1596         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1597         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1598         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1599         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1600         (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.