Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 1090:71e28626b358
prev1088:cf3900ae8acc
next1173:49207ef698e1
author nkeynes
date Tue Dec 15 08:46:37 2009 +1000 (14 years ago)
permissions -rw-r--r--
last change Add side-by-side x86+sh4 disassembly output
Print SH4 state information and disassembly of the current block when
crashing.
Fix delay slot instruction in conditional branch not being marked as a
delay-slot instruction in the branch-not-taken path.
Rename REG_* defines in cpu.h to avoid conflict with translation defs
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
    31 #define RAISE_MEM_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_exception(code);
    35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
    37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    38 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    40 /* Primary address space (used directly by SH4 cores) */
    41 mem_region_fn_t *sh4_address_space;
    42 mem_region_fn_t *sh4_user_address_space;
    44 /* Accessed from the UTLB accessor methods */
    45 uint32_t mmu_urc;
    46 uint32_t mmu_urb;
    47 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
    49 /* Module globals */
    50 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    51 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    52 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    53 static uint32_t mmu_lrui;
    54 static uint32_t mmu_asid; // current asid
    55 static struct utlb_default_regions *mmu_user_storequeue_regions;
    57 /* Structures for 1K page handling */
    58 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    59 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    60 static int mmu_utlb_1k_free_index;
    63 /* Function prototypes */
    64 static void mmu_invalidate_tlb();
    65 static void mmu_utlb_register_all();
    66 static void mmu_utlb_remove_entry(int);
    67 static void mmu_utlb_insert_entry(int);
    68 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    69 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    70 static void mmu_set_tlb_enabled( int tlb_on );
    71 static void mmu_set_tlb_asid( uint32_t asid );
    72 static void mmu_set_storequeue_protected( int protected, int tlb_on );
    73 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    74 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
    75 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
    76 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    77 static void mmu_utlb_1k_init();
    78 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    79 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    80 static int mmu_read_urc();
    82 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
    83 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    84 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    85 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
    86 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    87 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
    88 static uint32_t get_tlb_size_mask( uint32_t flags );
    89 static uint32_t get_tlb_size_pages( uint32_t flags );
    91 #define DEFAULT_REGIONS 0
    92 #define DEFAULT_STOREQUEUE_REGIONS 1
    93 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
    95 static struct utlb_default_regions mmu_default_regions[3] = {
    96         { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
    97         { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
    98         { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
   100 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
   102 /*********************** Module public functions ****************************/
   104 /**
   105  * Allocate memory for the address space maps, and initialize them according
   106  * to the default (reset) values. (TLB is disabled by default)
   107  */
   109 void MMU_init()
   110 {
   111     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   112     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   113     mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   115     mmu_set_tlb_enabled(0);
   116     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   117     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
   119     /* Setup P4 tlb/cache access regions */
   120     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   121     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   122     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   123     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   124     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   125     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   126     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   127     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   128     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   129     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   130     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   132     /* Setup P4 control region */
   133     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   134     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   135     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   136     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   137     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   138     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   139     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   140     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   141     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   142     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   143     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   144     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   145     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   147     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   148     mmu_utlb_1k_init();
   150     /* Ensure the code regions are executable (64-bit only). Although it might
   151      * be more portable to mmap these at runtime rather than using static decls
   152      */
   153 #if SIZEOF_VOID_P == 8
   154     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   155     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   156 #endif
   157 }
   159 void MMU_reset()
   160 {
   161     mmio_region_MMU_write( CCR, 0 );
   162     mmio_region_MMU_write( MMUCR, 0 );
   163 }
   165 void MMU_save_state( FILE *f )
   166 {
   167     mmu_read_urc();   
   168     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   169     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   170     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   171     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   172     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   173     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   174 }
   176 int MMU_load_state( FILE *f )
   177 {
   178     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   179         return 1;
   180     }
   181     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   182         return 1;
   183     }
   184     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   185         return 1;
   186     }
   187     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   188         return 1;
   189     }
   190     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   191         return 1;
   192     }
   193     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   194         return 1;
   195     }
   197     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   198     mmu_urc_overflow = mmu_urc >= mmu_urb;
   199     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   200     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
   201     return 0;
   202 }
   204 /**
   205  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   206  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   207  */
   208 void MMU_ldtlb()
   209 {
   210     int urc = mmu_read_urc();
   211     if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
   212         mmu_utlb_remove_entry( urc );
   213     mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   214     mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   215     mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   216     mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   217     mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
   218     mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
   219     if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
   220         mmu_utlb_insert_entry( urc );
   221 }
   224 MMIO_REGION_READ_FN( MMU, reg )
   225 {
   226     reg &= 0xFFF;
   227     switch( reg ) {
   228     case MMUCR:
   229         return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   230     default:
   231         return MMIO_READ( MMU, reg );
   232     }
   233 }
   235 MMIO_REGION_READ_DEFSUBFNS(MMU)
   237 MMIO_REGION_WRITE_FN( MMU, reg, val )
   238 {
   239     uint32_t tmp;
   240     reg &= 0xFFF;
   241     switch(reg) {
   242     case SH4VER:
   243         return;
   244     case PTEH:
   245         val &= 0xFFFFFCFF;
   246         if( (val & 0xFF) != mmu_asid ) {
   247             mmu_set_tlb_asid( val&0xFF );
   248         }
   249         break;
   250     case PTEL:
   251         val &= 0x1FFFFDFF;
   252         break;
   253     case PTEA:
   254         val &= 0x0000000F;
   255         break;
   256     case TRA:
   257         val &= 0x000003FC;
   258         break;
   259     case EXPEVT:
   260     case INTEVT:
   261         val &= 0x00000FFF;
   262         break;
   263     case MMUCR:
   264         if( val & MMUCR_TI ) {
   265             mmu_invalidate_tlb();
   266         }
   267         mmu_urc = (val >> 10) & 0x3F;
   268         mmu_urb = (val >> 18) & 0x3F;
   269         if( mmu_urb == 0 ) {
   270             mmu_urb = 0x40;
   271         } else if( mmu_urc >= mmu_urb ) {
   272             mmu_urc_overflow = TRUE;
   273         }
   274         mmu_lrui = (val >> 26) & 0x3F;
   275         val &= 0x00000301;
   276         tmp = MMIO_READ( MMU, MMUCR );
   277         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   278             mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
   279         }
   280         if( (val ^ tmp) & (MMUCR_AT) ) {
   281             // AT flag has changed state - flush the xlt cache as all bets
   282             // are off now. We also need to force an immediate exit from the
   283             // current block
   284             mmu_set_tlb_enabled( val & MMUCR_AT );
   285             MMIO_WRITE( MMU, MMUCR, val );
   286             sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
   287             xlat_flush_cache(); // If we're not running, flush the cache anyway
   288         }
   289         break;
   290     case CCR:
   291         CCN_set_cache_control( val );
   292         val &= 0x81A7;
   293         break;
   294     case MMUUNK1:
   295         /* Note that if the high bit is set, this appears to reset the machine.
   296          * Not emulating this behaviour yet until we know why...
   297          */
   298         val &= 0x00010007;
   299         break;
   300     case QACR0:
   301     case QACR1:
   302         val &= 0x0000001C;
   303         break;
   304     case PMCR1:
   305         PMM_write_control(0, val);
   306         val &= 0x0000C13F;
   307         break;
   308     case PMCR2:
   309         PMM_write_control(1, val);
   310         val &= 0x0000C13F;
   311         break;
   312     default:
   313         break;
   314     }
   315     MMIO_WRITE( MMU, reg, val );
   316 }
   318 /********************** 1K Page handling ***********************/
   319 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   320  * effort to manage - we justify this on the basis that most programs won't
   321  * actually use 1K pages, so we may as well optimize for the common case.
   322  * 
   323  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   324  * redirects requests to the 'real' page entry. These are allocated on an
   325  * as-needed basis, and returned to the pool when all subpages are empty.
   326  */ 
   327 static void mmu_utlb_1k_init()
   328 {
   329     int i;
   330     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   331         mmu_utlb_1k_free_list[i] = i;
   332         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   333     }
   334     mmu_utlb_1k_free_index = 0;
   335 }
   337 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   338 {
   339     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   340     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
   341     return entry;
   342 }    
   344 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   345 {
   346     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   347     assert( entryNo < UTLB_ENTRY_COUNT );
   348     assert( mmu_utlb_1k_free_index > 0 );
   349     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   350 }
   353 /********************** Address space maintenance *************************/
   355 /**
   356  * MMU accessor functions just increment URC - fixup here if necessary
   357  */
   358 static int mmu_read_urc()
   359 {
   360     if( mmu_urc_overflow ) {
   361         if( mmu_urc >= 0x40 ) {
   362             mmu_urc_overflow = FALSE;
   363             mmu_urc -= 0x40;
   364             mmu_urc %= mmu_urb;
   365         }
   366     } else {
   367         mmu_urc %= mmu_urb;
   368     }
   369     return mmu_urc;
   370 }
   372 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   373 {
   374     int count = (end - start) >> 12;
   375     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   376     while( count-- > 0 ) {
   377         *ptr++ = fn;
   378     }
   379 }
   380 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   381 {
   382     int count = (end - start) >> 12;
   383     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   384     while( count-- > 0 ) {
   385         *ptr++ = fn;
   386     }
   387 }
   389 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   390 {
   391     unsigned int i;
   392     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   393         /* TLB on */
   394         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   395         sh4_address_space[(page|0xA0000000)>>12] = fn;
   396         /* Scan UTLB and update any direct-referencing entries */
   397     } else {
   398         /* Direct map to U0, P0, P1, P2, P3 */
   399         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   400             sh4_address_space[(page|i)>>12] = fn;
   401         }
   402         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   403             sh4_user_address_space[(page|i)>>12] = fn;
   404         }
   405     }
   406     return TRUE;
   407 }
   409 static void mmu_set_tlb_enabled( int tlb_on )
   410 {
   411     mem_region_fn_t *ptr, *uptr;
   412     int i;
   414     /* Reset the storequeue area */
   416     if( tlb_on ) {
   417         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   418         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   419         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   421         /* Default SQ prefetch goes to TLB miss (?) */
   422         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
   423         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   424         mmu_utlb_register_all();
   425     } else {
   426         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   427             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   428         }
   429         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   430             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   431         }
   433         mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   434         if( IS_STOREQUEUE_PROTECTED() ) {
   435             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
   436         } else {
   437             mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   438         }
   439     }
   441 }
   443 /**
   444  * Flip the SQMD switch - this is rather expensive, so will need to be changed if
   445  * anything expects to do this frequently.
   446  */
   447 static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
   448 {
   449     mem_region_fn_t nontlb_region;
   450     int i;
   452     if( protected ) {
   453         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
   454         nontlb_region = &p4_region_storequeue_sqmd;
   455     } else {
   456         mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   457         nontlb_region = &p4_region_storequeue; 
   458     }
   460     if( tlb_on ) {
   461         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
   462         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   463             if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
   464                 mmu_utlb_insert_entry(i);
   465             }
   466         }
   467     } else {
   468         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
   469     }
   471 }
   473 static void mmu_set_tlb_asid( uint32_t asid )
   474 {
   475     if( IS_TLB_ENABLED() ) {
   476         /* Scan for pages that need to be remapped */
   477         int i;
   478         if( IS_SV_ENABLED() ) {
   479             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   480                 if( mmu_utlb[i].asid == mmu_asid &&
   481                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   482                     // Matches old ASID - unmap out
   483                     if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   484                             get_tlb_size_pages(mmu_utlb[i].flags) ) )
   485                         mmu_utlb_remap_pages( FALSE, TRUE, i );
   486                 }
   487             }
   488             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   489                 if( mmu_utlb[i].asid == asid &&
   490                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   491                     // Matches new ASID - map in
   492                     mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
   493                             mmu_utlb[i].vpn&mmu_utlb[i].mask,
   494                             get_tlb_size_pages(mmu_utlb[i].flags) );
   495                 }
   496             }
   497         } else {
   498             // Remap both Priv+user pages
   499             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   500                 if( mmu_utlb[i].asid == mmu_asid &&
   501                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   502                     if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   503                             get_tlb_size_pages(mmu_utlb[i].flags) ) )
   504                         mmu_utlb_remap_pages( TRUE, TRUE, i );
   505                 }
   506             }
   507             for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   508                 if( mmu_utlb[i].asid == asid &&
   509                         (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
   510                     mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
   511                             mmu_utlb[i].vpn&mmu_utlb[i].mask,
   512                             get_tlb_size_pages(mmu_utlb[i].flags) );
   513                 }
   514             }
   515         }
   516         sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   517     }
   518     mmu_asid = asid;
   519 }
   521 static uint32_t get_tlb_size_mask( uint32_t flags )
   522 {
   523     switch( flags & TLB_SIZE_MASK ) {
   524     case TLB_SIZE_1K: return MASK_1K;
   525     case TLB_SIZE_4K: return MASK_4K;
   526     case TLB_SIZE_64K: return MASK_64K;
   527     case TLB_SIZE_1M: return MASK_1M;
   528     default: return 0; /* Unreachable */
   529     }
   530 }
   531 static uint32_t get_tlb_size_pages( uint32_t flags )
   532 {
   533     switch( flags & TLB_SIZE_MASK ) {
   534     case TLB_SIZE_1K: return 0;
   535     case TLB_SIZE_4K: return 1;
   536     case TLB_SIZE_64K: return 16;
   537     case TLB_SIZE_1M: return 256;
   538     default: return 0; /* Unreachable */
   539     }
   540 }
   542 /**
   543  * Add a new TLB entry mapping to the address space table. If any of the pages
   544  * are already mapped, they are mapped to the TLB multi-hit page instead.
   545  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   546  */ 
   547 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   548 {
   549     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   550     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   551     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   552     struct utlb_default_regions *userdefs = privdefs;    
   554     gboolean mapping_ok = TRUE;
   555     int i;
   557     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   558         /* Storequeue mapping */
   559         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   560         userdefs = mmu_user_storequeue_regions;
   561     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   562         user_page = NULL; /* No user access to P3 region */
   563     } else if( start_addr >= 0x80000000 ) {
   564         return TRUE; // No mapping - legal but meaningless
   565     }
   567     if( npages == 0 ) {
   568         struct utlb_1k_entry *ent;
   569         int i, idx = (start_addr >> 10) & 0x03;
   570         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   571             ent = (struct utlb_1k_entry *)*ptr;
   572         } else {
   573             ent = mmu_utlb_1k_alloc();
   574             /* New 1K struct - init to previous contents of region */
   575             for( i=0; i<4; i++ ) {
   576                 ent->subpages[i] = *ptr;
   577                 ent->user_subpages[i] = *uptr;
   578             }
   579             *ptr = &ent->fn;
   580             *uptr = &ent->user_fn;
   581         }
   583         if( priv_page != NULL ) {
   584             if( ent->subpages[idx] == privdefs->tlb_miss ) {
   585                 ent->subpages[idx] = priv_page;
   586             } else {
   587                 mapping_ok = FALSE;
   588                 ent->subpages[idx] = privdefs->tlb_multihit;
   589             }
   590         }
   591         if( user_page != NULL ) {
   592             if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
   593                 ent->user_subpages[idx] = user_page;
   594             } else {
   595                 mapping_ok = FALSE;
   596                 ent->user_subpages[idx] = userdefs->tlb_multihit;
   597             }
   598         }
   600     } else {
   601         if( priv_page != NULL ) {
   602             /* Privileged mapping only */
   603             for( i=0; i<npages; i++ ) {
   604                 if( *ptr == privdefs->tlb_miss ) {
   605                     *ptr++ = priv_page;
   606                 } else {
   607                     mapping_ok = FALSE;
   608                     *ptr++ = privdefs->tlb_multihit;
   609                 }
   610             }
   611         }
   612         if( user_page != NULL ) {
   613             /* User mapping only (eg ASID change remap w/ SV=1) */
   614             for( i=0; i<npages; i++ ) {
   615                 if( *uptr == userdefs->tlb_miss ) {
   616                     *uptr++ = user_page;
   617                 } else {
   618                     mapping_ok = FALSE;
   619                     *uptr++ = userdefs->tlb_multihit;
   620                 }
   621             }        
   622         }
   623     }
   625     return mapping_ok;
   626 }
   628 /**
   629  * Remap any pages within the region covered by entryNo, but not including 
   630  * entryNo itself. This is used to reestablish pages that were previously
   631  * covered by a multi-hit exception region when one of the pages is removed.
   632  */
   633 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
   634 {
   635     int mask = mmu_utlb[entryNo].mask;
   636     uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
   637     int i;
   639     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   640         if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
   641             /* Overlapping region */
   642             mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
   643             mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
   644             uint32_t start_addr;
   645             int npages;
   647             if( mmu_utlb[i].mask >= mask ) {
   648                 /* entry is no larger than the area we're replacing - map completely */
   649                 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
   650                 npages = get_tlb_size_pages( mmu_utlb[i].flags );
   651             } else {
   652                 /* Otherwise map subset - region covered by removed page */
   653                 start_addr = remap_addr;
   654                 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
   655             }
   657             if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
   658                 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
   659             } else if( IS_SV_ENABLED() ) {
   660                 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
   661             }
   663         }
   664     }
   665 }
   667 /**
   668  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   669  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   670  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   671  */
   672 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
   673 {
   674     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   675     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   676     struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
   677     struct utlb_default_regions *userdefs = privdefs;
   679     gboolean unmapping_ok = TRUE;
   680     int i;
   682     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   683         /* Storequeue mapping */
   684         privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
   685         userdefs = mmu_user_storequeue_regions;
   686     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   687         unmap_user = FALSE;
   688     } else if( start_addr >= 0x80000000 ) {
   689         return TRUE; // No mapping - legal but meaningless
   690     }
   692     if( npages == 0 ) { // 1K page
   693         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   694         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   695         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   696         if( ent->subpages[idx] == privdefs->tlb_multihit ) {
   697             unmapping_ok = FALSE;
   698         }
   699         if( unmap_priv )
   700             ent->subpages[idx] = privdefs->tlb_miss;
   701         if( unmap_user )
   702             ent->user_subpages[idx] = userdefs->tlb_miss;
   704         /* If all 4 subpages have the same content, merge them together and
   705          * release the 1K entry
   706          */
   707         mem_region_fn_t priv_page = ent->subpages[0];
   708         mem_region_fn_t user_page = ent->user_subpages[0];
   709         for( i=1; i<4; i++ ) {
   710             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   711                 mergeable = 0;
   712                 break;
   713             }
   714         }
   715         if( mergeable ) {
   716             mmu_utlb_1k_free(ent);
   717             *ptr = priv_page;
   718             *uptr = user_page;
   719         }
   720     } else {
   721         if( unmap_priv ) {
   722             /* Privileged (un)mapping */
   723             for( i=0; i<npages; i++ ) {
   724                 if( *ptr == privdefs->tlb_multihit ) {
   725                     unmapping_ok = FALSE;
   726                 }
   727                 *ptr++ = privdefs->tlb_miss;
   728             }
   729         }
   730         if( unmap_user ) {
   731             /* User (un)mapping */
   732             for( i=0; i<npages; i++ ) {
   733                 if( *uptr == userdefs->tlb_multihit ) {
   734                     unmapping_ok = FALSE;
   735                 }
   736                 *uptr++ = userdefs->tlb_miss;
   737             }            
   738         }
   739     }
   741     return unmapping_ok;
   742 }
   744 static void mmu_utlb_insert_entry( int entry )
   745 {
   746     struct utlb_entry *ent = &mmu_utlb[entry];
   747     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   748     mem_region_fn_t upage;
   749     sh4addr_t start_addr = ent->vpn & ent->mask;
   750     int npages = get_tlb_size_pages(ent->flags);
   752     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   753         /* Store queue mappings are a bit different - normal access is fixed to
   754          * the store queue register block, and we only map prefetches through
   755          * the TLB 
   756          */
   757         mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
   759         if( (ent->flags & TLB_USERMODE) == 0 ) {
   760             upage = mmu_user_storequeue_regions->tlb_prot;
   761         } else if( IS_STOREQUEUE_PROTECTED() ) {
   762             upage = &p4_region_storequeue_sqmd;
   763         } else {
   764             upage = page;
   765         }
   767     }  else {
   769         if( (ent->flags & TLB_USERMODE) == 0 ) {
   770             upage = &mem_region_tlb_protected;
   771         } else {        
   772             upage = page;
   773         }
   775         if( (ent->flags & TLB_WRITABLE) == 0 ) {
   776             page->write_long = (mem_write_fn_t)tlb_protected_write;
   777             page->write_word = (mem_write_fn_t)tlb_protected_write;
   778             page->write_byte = (mem_write_fn_t)tlb_protected_write;
   779             page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   780             page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
   781             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   782         } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   783             page->write_long = (mem_write_fn_t)tlb_initial_write;
   784             page->write_word = (mem_write_fn_t)tlb_initial_write;
   785             page->write_byte = (mem_write_fn_t)tlb_initial_write;
   786             page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   787             page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
   788             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   789         } else {
   790             mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   791         }
   792     }
   794     mmu_utlb_pages[entry].user_fn = upage;
   796     /* Is page visible? */
   797     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   798         mmu_utlb_map_pages( page, upage, start_addr, npages );
   799     } else if( IS_SV_ENABLED() ) {
   800         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   801     }
   802 }
   804 static void mmu_utlb_remove_entry( int entry )
   805 {
   806     int i, j;
   807     struct utlb_entry *ent = &mmu_utlb[entry];
   808     sh4addr_t start_addr = ent->vpn&ent->mask;
   809     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   810     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   811     gboolean unmap_user;
   812     int npages = get_tlb_size_pages(ent->flags);
   814     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   815         unmap_user = TRUE;
   816     } else if( IS_SV_ENABLED() ) {
   817         unmap_user = FALSE;
   818     } else {
   819         return; // Not mapped
   820     }
   822     gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
   824     if( !clean_unmap ) {
   825         mmu_utlb_remap_pages( TRUE, unmap_user, entry );
   826     }
   827 }
   829 static void mmu_utlb_register_all()
   830 {
   831     int i;
   832     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   833         if( mmu_utlb[i].flags & TLB_VALID ) 
   834             mmu_utlb_insert_entry( i );
   835     }
   836 }
   838 static void mmu_invalidate_tlb()
   839 {
   840     int i;
   841     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   842         mmu_itlb[i].flags &= (~TLB_VALID);
   843     }
   844     if( IS_TLB_ENABLED() ) {
   845         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   846             if( mmu_utlb[i].flags & TLB_VALID ) {
   847                 mmu_utlb_remove_entry( i );
   848             }
   849         }
   850     }
   851     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   852         mmu_utlb[i].flags &= (~TLB_VALID);
   853     }
   854 }
   856 /******************************************************************************/
   857 /*                        MMU TLB address translation                         */
   858 /******************************************************************************/
   860 /**
   861  * Translate a 32-bit address into a UTLB entry number. Does not check for
   862  * page protection etc.
   863  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   864  */
   865 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   866 {
   867     mmu_urc++;
   868     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   869     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   870         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   871     } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
   872         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
   873         fn = ent->subpages[(vpn>>10)&0x03];
   874         if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   875             return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   876         }            
   877     } 
   878     if( fn == &mem_region_tlb_multihit ) {
   879         return -2;
   880     } else {
   881         return -1;
   882     }
   883 }
   886 /**
   887  * Perform the actual utlb lookup w/ asid matching.
   888  * Possible utcomes are:
   889  *   0..63 Single match - good, return entry found
   890  *   -1 No match - raise a tlb data miss exception
   891  *   -2 Multiple matches - raise a multi-hit exception (reset)
   892  * @param vpn virtual address to resolve
   893  * @return the resultant UTLB entry, or an error.
   894  */
   895 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   896 {
   897     int result = -1;
   898     unsigned int i;
   900     mmu_urc++;
   901     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   902         mmu_urc = 0;
   903     }
   905     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   906         if( (mmu_utlb[i].flags & TLB_VALID) &&
   907                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   908                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   909             if( result != -1 ) {
   910                 return -2;
   911             }
   912             result = i;
   913         }
   914     }
   915     return result;
   916 }
   918 /**
   919  * Perform the actual utlb lookup matching on vpn only
   920  * Possible utcomes are:
   921  *   0..63 Single match - good, return entry found
   922  *   -1 No match - raise a tlb data miss exception
   923  *   -2 Multiple matches - raise a multi-hit exception (reset)
   924  * @param vpn virtual address to resolve
   925  * @return the resultant UTLB entry, or an error.
   926  */
   927 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   928 {
   929     int result = -1;
   930     unsigned int i;
   932     mmu_urc++;
   933     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   934         mmu_urc = 0;
   935     }
   937     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   938         if( (mmu_utlb[i].flags & TLB_VALID) &&
   939                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   940             if( result != -1 ) {
   941                 return -2;
   942             }
   943             result = i;
   944         }
   945     }
   947     return result;
   948 }
   950 /**
   951  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   952  * @return the number (0-3) of the replaced entry.
   953  */
   954 static int inline mmu_itlb_update_from_utlb( int entryNo )
   955 {
   956     int replace;
   957     /* Determine entry to replace based on lrui */
   958     if( (mmu_lrui & 0x38) == 0x38 ) {
   959         replace = 0;
   960         mmu_lrui = mmu_lrui & 0x07;
   961     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   962         replace = 1;
   963         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   964     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   965         replace = 2;
   966         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   967     } else { // Note - gets invalid entries too
   968         replace = 3;
   969         mmu_lrui = (mmu_lrui | 0x0B);
   970     }
   972     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   973     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   974     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   975     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   976     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   977     return replace;
   978 }
   980 /**
   981  * Perform the actual itlb lookup w/ asid protection
   982  * Possible utcomes are:
   983  *   0..63 Single match - good, return entry found
   984  *   -1 No match - raise a tlb data miss exception
   985  *   -2 Multiple matches - raise a multi-hit exception (reset)
   986  * @param vpn virtual address to resolve
   987  * @return the resultant ITLB entry, or an error.
   988  */
   989 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   990 {
   991     int result = -1;
   992     unsigned int i;
   994     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   995         if( (mmu_itlb[i].flags & TLB_VALID) &&
   996                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   997                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   998             if( result != -1 ) {
   999                 return -2;
  1001             result = i;
  1005     if( result == -1 ) {
  1006         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
  1007         if( utlbEntry < 0 ) {
  1008             return utlbEntry;
  1009         } else {
  1010             return mmu_itlb_update_from_utlb( utlbEntry );
  1014     switch( result ) {
  1015     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1016     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1017     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1018     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1021     return result;
  1024 /**
  1025  * Perform the actual itlb lookup on vpn only
  1026  * Possible utcomes are:
  1027  *   0..63 Single match - good, return entry found
  1028  *   -1 No match - raise a tlb data miss exception
  1029  *   -2 Multiple matches - raise a multi-hit exception (reset)
  1030  * @param vpn virtual address to resolve
  1031  * @return the resultant ITLB entry, or an error.
  1032  */
  1033 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
  1035     int result = -1;
  1036     unsigned int i;
  1038     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1039         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1040                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1041             if( result != -1 ) {
  1042                 return -2;
  1044             result = i;
  1048     if( result == -1 ) {
  1049         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
  1050         if( utlbEntry < 0 ) {
  1051             return utlbEntry;
  1052         } else {
  1053             return mmu_itlb_update_from_utlb( utlbEntry );
  1057     switch( result ) {
  1058     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
  1059     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
  1060     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
  1061     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
  1064     return result;
  1067 /**
  1068  * Update the icache for an untranslated address
  1069  */
  1070 static inline void mmu_update_icache_phys( sh4addr_t addr )
  1072     if( (addr & 0x1C000000) == 0x0C000000 ) {
  1073         /* Main ram */
  1074         sh4_icache.page_vma = addr & 0xFF000000;
  1075         sh4_icache.page_ppa = 0x0C000000;
  1076         sh4_icache.mask = 0xFF000000;
  1077         sh4_icache.page = dc_main_ram;
  1078     } else if( (addr & 0x1FE00000) == 0 ) {
  1079         /* BIOS ROM */
  1080         sh4_icache.page_vma = addr & 0xFFE00000;
  1081         sh4_icache.page_ppa = 0;
  1082         sh4_icache.mask = 0xFFE00000;
  1083         sh4_icache.page = dc_boot_rom;
  1084     } else {
  1085         /* not supported */
  1086         sh4_icache.page_vma = -1;
  1090 /**
  1091  * Update the sh4_icache structure to describe the page(s) containing the
  1092  * given vma. If the address does not reference a RAM/ROM region, the icache
  1093  * will be invalidated instead.
  1094  * If AT is on, this method will raise TLB exceptions normally
  1095  * (hence this method should only be used immediately prior to execution of
  1096  * code), and otherwise will set the icache according to the matching TLB entry.
  1097  * If AT is off, this method will set the entire referenced RAM/ROM region in
  1098  * the icache.
  1099  * @return TRUE if the update completed (successfully or otherwise), FALSE
  1100  * if an exception was raised.
  1101  */
  1102 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
  1104     int entryNo;
  1105     if( IS_SH4_PRIVMODE()  ) {
  1106         if( addr & 0x80000000 ) {
  1107             if( addr < 0xC0000000 ) {
  1108                 /* P1, P2 and P4 regions are pass-through (no translation) */
  1109                 mmu_update_icache_phys(addr);
  1110                 return TRUE;
  1111             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
  1112                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1113                 return FALSE;
  1117         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1118         if( (mmucr & MMUCR_AT) == 0 ) {
  1119             mmu_update_icache_phys(addr);
  1120             return TRUE;
  1123         if( (mmucr & MMUCR_SV) == 0 )
  1124         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1125         else
  1126         	entryNo = mmu_itlb_lookup_vpn( addr );
  1127     } else {
  1128         if( addr & 0x80000000 ) {
  1129             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1130             return FALSE;
  1133         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1134         if( (mmucr & MMUCR_AT) == 0 ) {
  1135             mmu_update_icache_phys(addr);
  1136             return TRUE;
  1139         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1141         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1142             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1143             return FALSE;
  1147     switch(entryNo) {
  1148     case -1:
  1149     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1150     return FALSE;
  1151     case -2:
  1152     RAISE_TLB_MULTIHIT_ERROR(addr);
  1153     return FALSE;
  1154     default:
  1155         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1156         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1157         if( sh4_icache.page == NULL ) {
  1158             sh4_icache.page_vma = -1;
  1159         } else {
  1160             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1161             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1163         return TRUE;
  1167 /**
  1168  * Translate address for disassembly purposes (ie performs an instruction
  1169  * lookup) - does not raise exceptions or modify any state, and ignores
  1170  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1171  * on translation failure.
  1172  */
  1173 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1175     if( vma & 0x80000000 ) {
  1176         if( vma < 0xC0000000 ) {
  1177             /* P1, P2 and P4 regions are pass-through (no translation) */
  1178             return VMA_TO_EXT_ADDR(vma);
  1179         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1180             /* Not translatable */
  1181             return MMU_VMA_ERROR;
  1185     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1186     if( (mmucr & MMUCR_AT) == 0 ) {
  1187         return VMA_TO_EXT_ADDR(vma);
  1190     int entryNo = mmu_itlb_lookup_vpn( vma );
  1191     if( entryNo == -2 ) {
  1192         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1194     if( entryNo < 0 ) {
  1195         return MMU_VMA_ERROR;
  1196     } else {
  1197         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1198         (vma & (~mmu_itlb[entryNo].mask));
  1202 /********************** TLB Direct-Access Regions ***************************/
  1203 #ifdef HAVE_FRAME_ADDRESS
  1204 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; } while(0)
  1205 #else
  1206 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1207 #endif
  1210 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1212 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1214     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1215     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1218 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1220     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1221     ent->vpn = val & 0xFFFFFC00;
  1222     ent->asid = val & 0x000000FF;
  1223     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1226 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1228     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1229     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1232 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1234     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1235     ent->ppn = val & 0x1FFFFC00;
  1236     ent->flags = val & 0x00001DA;
  1237     ent->mask = get_tlb_size_mask(val);
  1238     if( ent->ppn >= 0x1C000000 )
  1239         ent->ppn |= 0xE0000000;
  1242 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1243 #define UTLB_ASSOC(addr) (addr&0x80)
  1244 #define UTLB_DATA2(addr) (addr&0x00800000)
  1246 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1248     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1249     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1250     ((ent->flags & TLB_DIRTY)<<7);
  1252 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1254     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1255     if( UTLB_DATA2(addr) ) {
  1256         return ent->pcmcia;
  1257     } else {
  1258         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1262 /**
  1263  * Find a UTLB entry for the associative TLB write - same as the normal
  1264  * lookup but ignores the valid bit.
  1265  */
  1266 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1268     int result = -1;
  1269     unsigned int i;
  1270     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1271         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1272                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1273                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1274             if( result != -1 ) {
  1275                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1276                 return -2;
  1278             result = i;
  1281     return result;
  1284 /**
  1285  * Find a ITLB entry for the associative TLB write - same as the normal
  1286  * lookup but ignores the valid bit.
  1287  */
  1288 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1290     int result = -1;
  1291     unsigned int i;
  1292     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1293         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1294                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1295                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1296             if( result != -1 ) {
  1297                 return -2;
  1299             result = i;
  1302     return result;
  1305 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1307     if( UTLB_ASSOC(addr) ) {
  1308         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1309         if( utlb >= 0 ) {
  1310             struct utlb_entry *ent = &mmu_utlb[utlb];
  1311             uint32_t old_flags = ent->flags;
  1312             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1313             ent->flags |= (val & TLB_VALID);
  1314             ent->flags |= ((val & 0x200)>>7);
  1315             if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1316                 if( old_flags & TLB_VALID )
  1317                     mmu_utlb_remove_entry( utlb );
  1318                 if( ent->flags & TLB_VALID )
  1319                     mmu_utlb_insert_entry( utlb );
  1323         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1324         if( itlb >= 0 ) {
  1325             struct itlb_entry *ent = &mmu_itlb[itlb];
  1326             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1329         if( itlb == -2 || utlb == -2 ) {
  1330             RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
  1331             EXCEPTION_EXIT();
  1332             return;
  1334     } else {
  1335         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1336         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1337             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1338         ent->vpn = (val & 0xFFFFFC00);
  1339         ent->asid = (val & 0xFF);
  1340         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1341         ent->flags |= (val & TLB_VALID);
  1342         ent->flags |= ((val & 0x200)>>7);
  1343         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1344             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1348 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1350     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1351     if( UTLB_DATA2(addr) ) {
  1352         ent->pcmcia = val & 0x0000000F;
  1353     } else {
  1354         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1355             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1356         ent->ppn = (val & 0x1FFFFC00);
  1357         ent->flags = (val & 0x000001FF);
  1358         ent->mask = get_tlb_size_mask(val);
  1359         if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
  1360             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1364 struct mem_region_fn p4_region_itlb_addr = {
  1365         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1366         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1367         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1368         unmapped_read_burst, unmapped_write_burst,
  1369         unmapped_prefetch, mmu_itlb_addr_read };
  1370 struct mem_region_fn p4_region_itlb_data = {
  1371         mmu_itlb_data_read, mmu_itlb_data_write,
  1372         mmu_itlb_data_read, mmu_itlb_data_write,
  1373         mmu_itlb_data_read, mmu_itlb_data_write,
  1374         unmapped_read_burst, unmapped_write_burst,
  1375         unmapped_prefetch, mmu_itlb_data_read };
  1376 struct mem_region_fn p4_region_utlb_addr = {
  1377         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1378         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1379         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1380         unmapped_read_burst, unmapped_write_burst,
  1381         unmapped_prefetch, mmu_utlb_addr_read };
  1382 struct mem_region_fn p4_region_utlb_data = {
  1383         mmu_utlb_data_read, mmu_utlb_data_write,
  1384         mmu_utlb_data_read, mmu_utlb_data_write,
  1385         mmu_utlb_data_read, mmu_utlb_data_write,
  1386         unmapped_read_burst, unmapped_write_burst,
  1387         unmapped_prefetch, mmu_utlb_data_read };
  1389 /********************** Error regions **************************/
  1391 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1393     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1394     EXCEPTION_EXIT();
  1397 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc ) 
  1399     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1400     EXCEPTION_EXIT();
  1403 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1405     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1406     EXCEPTION_EXIT();
  1409 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1411     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1412     EXCEPTION_EXIT();
  1415 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1417     mmu_urc++;
  1418     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1419     EXCEPTION_EXIT();
  1422 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
  1424     mmu_urc++;
  1425     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1426     EXCEPTION_EXIT();
  1429 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1431     mmu_urc++;
  1432     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1433     EXCEPTION_EXIT();
  1436 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1438     mmu_urc++;
  1439     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1440     EXCEPTION_EXIT();
  1443 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1445     mmu_urc++;
  1446     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1447     EXCEPTION_EXIT();
  1448     return 0; 
  1451 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
  1453     mmu_urc++;
  1454     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1455     EXCEPTION_EXIT();
  1456     return 0;
  1459 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1461     mmu_urc++;
  1462     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1463     EXCEPTION_EXIT();
  1464     return 0;
  1467 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1469     mmu_urc++;
  1470     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1471     EXCEPTION_EXIT();
  1474 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1476     mmu_urc++;
  1477     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1478     EXCEPTION_EXIT();
  1481 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
  1483     mmu_urc++;
  1484     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1485     EXCEPTION_EXIT();
  1486     return 0;
  1489 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1491     sh4_raise_tlb_multihit(addr);
  1492     EXCEPTION_EXIT();
  1493     return 0; 
  1496 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1498     sh4_raise_tlb_multihit(addr);
  1499     EXCEPTION_EXIT();
  1500     return 0; 
  1502 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1504     sh4_raise_tlb_multihit(addr);
  1505     EXCEPTION_EXIT();
  1508 /**
  1509  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1510  */
  1511 struct mem_region_fn mem_region_address_error = {
  1512         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1513         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1514         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1515         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1516         unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
  1518 struct mem_region_fn mem_region_tlb_miss = {
  1519         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1520         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1521         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1522         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
  1523         unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
  1525 struct mem_region_fn mem_region_tlb_protected = {
  1526         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1527         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1528         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1529         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
  1530         unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
  1532 struct mem_region_fn mem_region_tlb_multihit = {
  1533         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1534         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1535         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1536         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
  1537         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
  1540 /* Store-queue regions */
  1541 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
  1542  * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
  1543  * some cases), in contrast to the ordinary fields above.
  1545  * There is probably a simpler way to do this.
  1546  */
  1548 struct mem_region_fn p4_region_storequeue = { 
  1549         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1550         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1551         unmapped_read_long, unmapped_write_long,
  1552         unmapped_read_burst, unmapped_write_burst,
  1553         ccn_storequeue_prefetch, unmapped_read_long }; 
  1555 struct mem_region_fn p4_region_storequeue_miss = { 
  1556         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1557         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1558         unmapped_read_long, unmapped_write_long,
  1559         unmapped_read_burst, unmapped_write_burst,
  1560         (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long }; 
  1562 struct mem_region_fn p4_region_storequeue_multihit = { 
  1563         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1564         unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
  1565         unmapped_read_long, unmapped_write_long,
  1566         unmapped_read_burst, unmapped_write_burst,
  1567         (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long }; 
  1569 struct mem_region_fn p4_region_storequeue_protected = {
  1570         ccn_storequeue_read_long, ccn_storequeue_write_long,
  1571         unmapped_read_long, unmapped_write_long,
  1572         unmapped_read_long, unmapped_write_long,
  1573         unmapped_read_burst, unmapped_write_burst,
  1574         (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
  1576 struct mem_region_fn p4_region_storequeue_sqmd = {
  1577         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1578         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1579         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1580         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1581         (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
  1583 struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
  1584         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1585         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1586         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1587         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1588         (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write }; 
  1590 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
  1591         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1592         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1593         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1594         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1595         (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
  1597 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
  1598         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1599         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1600         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1601         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
  1602         (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.