Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 939:6f2302afeb89
prev934:3acd3b3ee6d1
next943:9a277733eafa
author nkeynes
date Sat Jan 03 08:55:15 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Implement CORE_EXIT_EXCEPTION for use when direct frame messing about doesn't work
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * SH4 MMU implementation based on address space page maps. This module
     5  * is responsible for all address decoding functions. 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #define MODULE sh4_module
    21 #include <stdio.h>
    22 #include <assert.h>
    23 #include "sh4/sh4mmio.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "dreamcast.h"
    27 #include "mem.h"
    28 #include "mmu.h"
    30 #define RAISE_TLB_ERROR(code, vpn) \
    31     MMIO_WRITE(MMU, TEA, vpn); \
    32     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    33     sh4_raise_tlb_exception(code);
    34 #define RAISE_MEM_ERROR(code, vpn) \
    35     MMIO_WRITE(MMU, TEA, vpn); \
    36     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    37     sh4_raise_exception(code);
    38 #define RAISE_TLB_MULTIHIT_ERROR(vpn) \
    39     sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    43 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
    44 #define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
    46 /* Primary address space (used directly by SH4 cores) */
    47 mem_region_fn_t *sh4_address_space;
    48 mem_region_fn_t *sh4_user_address_space;
    50 /* MMU-mapped storequeue targets. Only used with TLB on */
    51 mem_region_fn_t *storequeue_address_space; 
    52 mem_region_fn_t *storequeue_user_address_space; 
    54 /* Accessed from the UTLB accessor methods */
    55 uint32_t mmu_urc;
    56 uint32_t mmu_urb;
    58 /* Module globals */
    59 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    60 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    61 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
    62 static uint32_t mmu_lrui;
    63 static uint32_t mmu_asid; // current asid
    65 /* Structures for 1K page handling */
    66 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
    67 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
    68 static int mmu_utlb_1k_free_index;
    71 /* Function prototypes */
    72 static void mmu_invalidate_tlb();
    73 static void mmu_utlb_register_all();
    74 static void mmu_utlb_remove_entry(int);
    75 static void mmu_utlb_insert_entry(int);
    76 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    77 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
    78 static void mmu_set_tlb_enabled( int tlb_on );
    79 static void mmu_set_tlb_asid( uint32_t asid );
    80 static void mmu_set_storequeue_protected( int protected );
    81 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
    82 static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages );
    83 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
    84 static void mmu_utlb_1k_init();
    85 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
    86 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
    88 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
    89 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
    90 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
    91 static uint32_t get_tlb_size_mask( uint32_t flags );
    92 static uint32_t get_tlb_size_pages( uint32_t flags );
    95 /*********************** Module public functions ****************************/
    97 /**
    98  * Allocate memory for the address space maps, and initialize them according
    99  * to the default (reset) values. (TLB is disabled by default)
   100  */
   102 void MMU_init()
   103 {
   104     sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   105     sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
   106     storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
   107     storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
   109     mmu_set_tlb_enabled(0);
   110     mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
   111     mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
   113     /* Setup P4 tlb/cache access regions */
   114     mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   115     mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
   116     mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
   117     mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
   118     mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
   119     mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
   120     mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
   121     mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
   122     mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
   123     mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
   124     mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
   126     /* Setup P4 control region */
   127     mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
   128     mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
   129     mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
   130     mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
   131     mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
   132     mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
   133     mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
   134     mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
   135     mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
   136     mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
   137     mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
   138     mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
   139     mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
   141     register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
   142     mmu_utlb_1k_init();
   144     /* Ensure the code regions are executable */
   145     mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
   146     mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
   147 }
   149 void MMU_reset()
   150 {
   151     mmio_region_MMU_write( CCR, 0 );
   152     mmio_region_MMU_write( MMUCR, 0 );
   153 }
   155 void MMU_save_state( FILE *f )
   156 {
   157     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   158     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   159     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   160     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   161     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   162     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   163 }
   165 int MMU_load_state( FILE *f )
   166 {
   167     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   168         return 1;
   169     }
   170     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   171         return 1;
   172     }
   173     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   174         return 1;
   175     }
   176     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   177         return 1;
   178     }
   179     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   180         return 1;
   181     }
   182     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   183         return 1;
   184     }
   186     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   187     mmu_set_tlb_enabled(mmucr&MMUCR_AT);
   188     mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
   189     return 0;
   190 }
   192 /**
   193  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   194  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   195  */
   196 void MMU_ldtlb()
   197 {
   198     mmu_urc %= mmu_urb;
   199     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   200         mmu_utlb_remove_entry( mmu_urc );
   201     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   202     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   203     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   204     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   205     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   206     mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
   207     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   208         mmu_utlb_insert_entry( mmu_urc );
   209 }
   212 MMIO_REGION_READ_FN( MMU, reg )
   213 {
   214     reg &= 0xFFF;
   215     switch( reg ) {
   216     case MMUCR:
   217         mmu_urc %= mmu_urb;
   218         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
   219     default:
   220         return MMIO_READ( MMU, reg );
   221     }
   222 }
   224 MMIO_REGION_WRITE_FN( MMU, reg, val )
   225 {
   226     uint32_t tmp;
   227     reg &= 0xFFF;
   228     switch(reg) {
   229     case SH4VER:
   230         return;
   231     case PTEH:
   232         val &= 0xFFFFFCFF;
   233         if( (val & 0xFF) != mmu_asid ) {
   234             mmu_set_tlb_asid( val&0xFF );
   235             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   236         }
   237         break;
   238     case PTEL:
   239         val &= 0x1FFFFDFF;
   240         break;
   241     case PTEA:
   242         val &= 0x0000000F;
   243         break;
   244     case TRA:
   245         val &= 0x000003FC;
   246         break;
   247     case EXPEVT:
   248     case INTEVT:
   249         val &= 0x00000FFF;
   250         break;
   251     case MMUCR:
   252         if( val & MMUCR_TI ) {
   253             mmu_invalidate_tlb();
   254         }
   255         mmu_urc = (val >> 10) & 0x3F;
   256         mmu_urb = (val >> 18) & 0x3F;
   257         if( mmu_urb == 0 ) {
   258             mmu_urb = 0x40;
   259         }
   260         mmu_lrui = (val >> 26) & 0x3F;
   261         val &= 0x00000301;
   262         tmp = MMIO_READ( MMU, MMUCR );
   263         if( (val ^ tmp) & (MMUCR_SQMD) ) {
   264             mmu_set_storequeue_protected( val & MMUCR_SQMD );
   265         }
   266         if( (val ^ tmp) & (MMUCR_AT) ) {
   267             // AT flag has changed state - flush the xlt cache as all bets
   268             // are off now. We also need to force an immediate exit from the
   269             // current block
   270             mmu_set_tlb_enabled( val & MMUCR_AT );
   271             MMIO_WRITE( MMU, MMUCR, val );
   272             sh4_flush_icache();
   273         }
   274         break;
   275     case CCR:
   276         CCN_set_cache_control( val );
   277         val &= 0x81A7;
   278         break;
   279     case MMUUNK1:
   280         /* Note that if the high bit is set, this appears to reset the machine.
   281          * Not emulating this behaviour yet until we know why...
   282          */
   283         val &= 0x00010007;
   284         break;
   285     case QACR0:
   286     case QACR1:
   287         val &= 0x0000001C;
   288         break;
   289     case PMCR1:
   290         PMM_write_control(0, val);
   291         val &= 0x0000C13F;
   292         break;
   293     case PMCR2:
   294         PMM_write_control(1, val);
   295         val &= 0x0000C13F;
   296         break;
   297     default:
   298         break;
   299     }
   300     MMIO_WRITE( MMU, reg, val );
   301 }
   303 /********************** 1K Page handling ***********************/
   304 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
   305  * effort to manage - we justify this on the basis that most programs won't
   306  * actually use 1K pages, so we may as well optimize for the common case.
   307  * 
   308  * Implementation uses an intermediate page entry (the utlb_1k_entry) that
   309  * redirects requests to the 'real' page entry. These are allocated on an
   310  * as-needed basis, and returned to the pool when all subpages are empty.
   311  */ 
   312 static void mmu_utlb_1k_init()
   313 {
   314     int i;
   315     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   316         mmu_utlb_1k_free_list[i] = i;
   317         mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
   318     }
   319     mmu_utlb_1k_free_index = 0;
   320 }
   322 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
   323 {
   324     assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
   325     struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
   326     return entry;
   327 }    
   329 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
   330 {
   331     unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
   332     assert( entryNo < UTLB_ENTRY_COUNT );
   333     assert( mmu_utlb_1k_free_index > 0 );
   334     mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
   335 }
   338 /********************** Address space maintenance *************************/
   340 /**
   341  * MMU accessor functions just increment URC - fixup here if necessary
   342  */
   343 static inline void mmu_urc_fixup()
   344 {
   345    mmu_urc %= mmu_urb; 
   346 }
   348 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   349 {
   350     int count = (end - start) >> 12;
   351     mem_region_fn_t *ptr = &sh4_address_space[start>>12];
   352     while( count-- > 0 ) {
   353         *ptr++ = fn;
   354     }
   355 }
   356 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
   357 {
   358     int count = (end - start) >> 12;
   359     mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
   360     while( count-- > 0 ) {
   361         *ptr++ = fn;
   362     }
   363 }
   365 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
   366 {
   367     int i;
   368     if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
   369         /* TLB on */
   370         sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
   371         sh4_address_space[(page|0xA0000000)>>12] = fn;
   372         /* Scan UTLB and update any direct-referencing entries */
   373     } else {
   374         /* Direct map to U0, P0, P1, P2, P3 */
   375         for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
   376             sh4_address_space[(page|i)>>12] = fn;
   377         }
   378         for( i=0; i < 0x80000000; i+= 0x20000000 ) {
   379             sh4_user_address_space[(page|i)>>12] = fn;
   380         }
   381     }
   382 }
   384 static void mmu_set_tlb_enabled( int tlb_on )
   385 {
   386     mem_region_fn_t *ptr, *uptr;
   387     int i;
   389     if( tlb_on ) {
   390         mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   391         mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
   392         mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
   393         for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space; 
   394              i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
   395             *ptr++ = &mem_region_tlb_miss;
   396             *uptr++ = &mem_region_tlb_miss;
   397         }
   398         mmu_utlb_register_all();
   399     } else {
   400         for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   401             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   402         }
   403         for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
   404             memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
   405         }
   406     }
   407 }
   409 static void mmu_set_storequeue_protected( int protected ) 
   410 {
   411     if( protected ) {
   412         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
   413     } else {
   414         mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
   415     }
   416 }
   418 static void mmu_set_tlb_asid( uint32_t asid )
   419 {
   420     /* Scan for pages that need to be remapped */
   421     int i;
   422     if( IS_SV_ENABLED() ) {
   423         // FIXME: Priv pages don't change - only user pages are mapped in/out 
   424         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   425             if( mmu_utlb[i].flags & TLB_VALID ) {
   426                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   427                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   428                         mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   429                                 get_tlb_size_pages(mmu_utlb[i].flags) );
   430                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   431                         mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
   432                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   433                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   434                     }
   435                 }
   436             }
   437         }
   438     } else {
   439         // Remap both Priv+user pages
   440         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   441             if( mmu_utlb[i].flags & TLB_VALID ) {
   442                 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
   443                     if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
   444                         mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
   445                                 get_tlb_size_pages(mmu_utlb[i].flags) );
   446                     } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
   447                         mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
   448                                 mmu_utlb[i].vpn&mmu_utlb[i].mask, 
   449                                 get_tlb_size_pages(mmu_utlb[i].flags) );  
   450                     }
   451                 }
   452             }
   453         }
   454     }
   456     mmu_asid = asid;
   457 }
   459 static uint32_t get_tlb_size_mask( uint32_t flags )
   460 {
   461     switch( flags & TLB_SIZE_MASK ) {
   462     case TLB_SIZE_1K: return MASK_1K;
   463     case TLB_SIZE_4K: return MASK_4K;
   464     case TLB_SIZE_64K: return MASK_64K;
   465     case TLB_SIZE_1M: return MASK_1M;
   466     default: return 0; /* Unreachable */
   467     }
   468 }
   469 static uint32_t get_tlb_size_pages( uint32_t flags )
   470 {
   471     switch( flags & TLB_SIZE_MASK ) {
   472     case TLB_SIZE_1K: return 0;
   473     case TLB_SIZE_4K: return 1;
   474     case TLB_SIZE_64K: return 16;
   475     case TLB_SIZE_1M: return 256;
   476     default: return 0; /* Unreachable */
   477     }
   478 }
   480 /**
   481  * Add a new TLB entry mapping to the address space table. If any of the pages
   482  * are already mapped, they are mapped to the TLB multi-hit page instead.
   483  * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
   484  */ 
   485 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
   486 {
   487     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   488     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   489     gboolean mapping_ok = TRUE;
   490     int i;
   492     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   493         /* Storequeue mapping */
   494         ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
   495         uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
   496     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   497         user_page = NULL; /* No user access to P3 region */
   498     } else if( start_addr >= 0x80000000 ) {
   499         return TRUE; // No mapping - legal but meaningless
   500     }
   502     if( npages == 0 ) {
   503         struct utlb_1k_entry *ent;
   504         int i, idx = (start_addr >> 10) & 0x03;
   505         if( IS_1K_PAGE_ENTRY(*ptr) ) {
   506             ent = (struct utlb_1k_entry *)*ptr;
   507         } else {
   508             ent = mmu_utlb_1k_alloc();
   509             /* New 1K struct - init to previous contents of region */
   510             for( i=0; i<4; i++ ) {
   511                 ent->subpages[i] = *ptr;
   512                 ent->user_subpages[i] = *uptr;
   513             }
   514             *ptr = &ent->fn;
   515             *uptr = &ent->user_fn;
   516         }
   518         if( priv_page != NULL ) {
   519             if( ent->subpages[idx] == &mem_region_tlb_miss ) {
   520                 ent->subpages[idx] = priv_page;
   521             } else {
   522                 mapping_ok = FALSE;
   523                 ent->subpages[idx] = &mem_region_tlb_multihit;
   524             }
   525         }
   526         if( user_page != NULL ) {
   527             if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
   528                 ent->user_subpages[idx] = user_page;
   529             } else {
   530                 mapping_ok = FALSE;
   531                 ent->user_subpages[idx] = &mem_region_tlb_multihit;
   532             }
   533         }
   535     } else {
   537         if( user_page == NULL ) {
   538             /* Privileged mapping only */
   539             for( i=0; i<npages; i++ ) {
   540                 if( *ptr == &mem_region_tlb_miss ) {
   541                     *ptr++ = priv_page;
   542                 } else {
   543                     mapping_ok = FALSE;
   544                     *ptr++ = &mem_region_tlb_multihit;
   545                 }
   546             }
   547         } else if( priv_page == NULL ) {
   548             /* User mapping only (eg ASID change remap) */
   549             for( i=0; i<npages; i++ ) {
   550                 if( *uptr == &mem_region_tlb_miss ) {
   551                     *uptr++ = user_page;
   552                 } else {
   553                     mapping_ok = FALSE;
   554                     *uptr++ = &mem_region_tlb_multihit;
   555                 }
   556             }        
   557         } else {
   558             for( i=0; i<npages; i++ ) {
   559                 if( *ptr == &mem_region_tlb_miss ) {
   560                     *ptr++ = priv_page;
   561                     *uptr++ = user_page;
   562                 } else {
   563                     mapping_ok = FALSE;
   564                     *ptr++ = &mem_region_tlb_multihit;
   565                     *uptr++ = &mem_region_tlb_multihit;
   566                 }
   567             }
   568         }
   569     }
   570     return mapping_ok;
   571 }
   573 /**
   574  * Remove a previous TLB mapping (replacing them with the TLB miss region).
   575  * @return FALSE if any pages were previously mapped to the TLB multihit page, 
   576  * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
   577  */
   578 static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages )
   579 {
   580     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   581     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   582     gboolean unmapping_ok = TRUE;
   583     int i;
   585     if( (start_addr & 0xFC000000) == 0xE0000000 ) {
   586         /* Storequeue mapping */
   587         ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
   588         uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
   589     } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
   590         unmap_user = FALSE;
   591     } else if( start_addr >= 0x80000000 ) {
   592         return TRUE; // No mapping - legal but meaningless
   593     }
   595     if( npages == 0 ) { // 1K page
   596         assert( IS_1K_PAGE_ENTRY( *ptr ) );
   597         struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
   598         int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
   599         if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
   600             unmapping_ok = FALSE;
   601         }
   602         ent->subpages[idx] = &mem_region_tlb_miss;
   603         ent->user_subpages[idx] = &mem_region_tlb_miss;
   605         /* If all 4 subpages have the same content, merge them together and
   606          * release the 1K entry
   607          */
   608         mem_region_fn_t priv_page = ent->subpages[0];
   609         mem_region_fn_t user_page = ent->user_subpages[0];
   610         for( i=1; i<4; i++ ) {
   611             if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
   612                 mergeable = 0;
   613                 break;
   614             }
   615         }
   616         if( mergeable ) {
   617             mmu_utlb_1k_free(ent);
   618             *ptr = priv_page;
   619             *uptr = user_page;
   620         }
   621     } else {
   622         if( !unmap_user ) {
   623             /* Privileged (un)mapping only */
   624             for( i=0; i<npages; i++ ) {
   625                 if( *ptr == &mem_region_tlb_multihit ) {
   626                     unmapping_ok = FALSE;
   627                 }
   628                 *ptr++ = &mem_region_tlb_miss;
   629             }
   630         } else {
   631             for( i=0; i<npages; i++ ) {
   632                 if( *ptr == &mem_region_tlb_multihit ) {
   633                     unmapping_ok = FALSE;
   634                 }
   635                 *ptr++ = &mem_region_tlb_miss;
   636                 *uptr++ = &mem_region_tlb_miss;
   637             }
   638         }
   639     }
   640     return unmapping_ok;
   641 }
   643 static void mmu_utlb_insert_entry( int entry )
   644 {
   645     struct utlb_entry *ent = &mmu_utlb[entry];
   646     mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
   647     mem_region_fn_t upage;
   648     sh4addr_t start_addr = ent->vpn & ent->mask;
   649     int npages = get_tlb_size_pages(ent->flags);
   651     if( (ent->flags & TLB_USERMODE) == 0 ) {
   652         upage = &mem_region_user_protected;
   653     } else {        
   654         upage = page;
   655     }
   656     mmu_utlb_pages[entry].user_fn = upage;
   658     if( (ent->flags & TLB_WRITABLE) == 0 ) {
   659         page->write_long = (mem_write_fn_t)tlb_protected_write;
   660         page->write_word = (mem_write_fn_t)tlb_protected_write;
   661         page->write_byte = (mem_write_fn_t)tlb_protected_write;
   662         page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
   663         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   664     } else if( (ent->flags & TLB_DIRTY) == 0 ) {
   665         page->write_long = (mem_write_fn_t)tlb_initial_write;
   666         page->write_word = (mem_write_fn_t)tlb_initial_write;
   667         page->write_byte = (mem_write_fn_t)tlb_initial_write;
   668         page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
   669         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
   670     } else {
   671         mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
   672     }
   674     /* Is page visible? */
   675     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
   676         mmu_utlb_map_pages( page, upage, start_addr, npages );
   677     } else if( IS_SV_ENABLED() ) {
   678         mmu_utlb_map_pages( page, NULL, start_addr, npages );
   679     }
   680 }
   682 static void mmu_utlb_remove_entry( int entry )
   683 {
   684     int i, j;
   685     struct utlb_entry *ent = &mmu_utlb[entry];
   686     sh4addr_t start_addr = ent->vpn&ent->mask;
   687     mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
   688     mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
   689     gboolean unmap_user;
   690     int npages = get_tlb_size_pages(ent->flags);
   692     if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
   693         unmap_user = TRUE;
   694     } else if( IS_SV_ENABLED() ) {
   695         unmap_user = FALSE;
   696     } else {
   697         return; // Not mapped
   698     }
   700     gboolean clean_unmap = mmu_utlb_unmap_pages( unmap_user, start_addr, npages );
   702     if( !clean_unmap ) {
   703         /* If we ran into a multi-hit, we now need to rescan the UTLB for the other entries
   704          * and remap them */
   705         for( j=0; j<UTLB_ENTRY_COUNT; j++ ) {
   706             uint32_t mask = MIN(mmu_utlb[j].mask, ent->mask);
   707             if( j != entry && (start_addr & mask) == (mmu_utlb[j].vpn & mask) ) {
   709             }
   710         }
   711     }
   712 }
   714 static void mmu_utlb_register_all()
   715 {
   716     int i;
   717     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   718         if( mmu_utlb[i].flags & TLB_VALID ) 
   719             mmu_utlb_insert_entry( i );
   720     }
   721 }
   723 static void mmu_invalidate_tlb()
   724 {
   725     int i;
   726     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   727         mmu_itlb[i].flags &= (~TLB_VALID);
   728     }
   729     if( IS_TLB_ENABLED() ) {
   730         for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   731             if( mmu_utlb[i].flags & TLB_VALID ) {
   732                 mmu_utlb_remove_entry( i );
   733             }
   734         }
   735     }
   736     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   737         mmu_utlb[i].flags &= (~TLB_VALID);
   738     }
   739 }
   741 /******************************************************************************/
   742 /*                        MMU TLB address translation                         */
   743 /******************************************************************************/
   745 /**
   746  * Translate a 32-bit address into a UTLB entry number. Does not check for
   747  * page protection etc.
   748  * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
   749  */
   750 int mmu_utlb_entry_for_vpn( uint32_t vpn )
   751 {
   752     mem_region_fn_t fn = sh4_address_space[vpn>>12];
   753     if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
   754         return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
   755     } else if( fn == &mem_region_tlb_multihit ) {
   756         return -2;
   757     } else {
   758         return -1;
   759     }
   760 }
   763 /**
   764  * Perform the actual utlb lookup w/ asid matching.
   765  * Possible utcomes are:
   766  *   0..63 Single match - good, return entry found
   767  *   -1 No match - raise a tlb data miss exception
   768  *   -2 Multiple matches - raise a multi-hit exception (reset)
   769  * @param vpn virtual address to resolve
   770  * @return the resultant UTLB entry, or an error.
   771  */
   772 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   773 {
   774     int result = -1;
   775     unsigned int i;
   777     mmu_urc++;
   778     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   779         mmu_urc = 0;
   780     }
   782     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   783         if( (mmu_utlb[i].flags & TLB_VALID) &&
   784                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   785                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   786             if( result != -1 ) {
   787                 return -2;
   788             }
   789             result = i;
   790         }
   791     }
   792     return result;
   793 }
   795 /**
   796  * Perform the actual utlb lookup matching on vpn only
   797  * Possible utcomes are:
   798  *   0..63 Single match - good, return entry found
   799  *   -1 No match - raise a tlb data miss exception
   800  *   -2 Multiple matches - raise a multi-hit exception (reset)
   801  * @param vpn virtual address to resolve
   802  * @return the resultant UTLB entry, or an error.
   803  */
   804 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   805 {
   806     int result = -1;
   807     unsigned int i;
   809     mmu_urc++;
   810     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   811         mmu_urc = 0;
   812     }
   814     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   815         if( (mmu_utlb[i].flags & TLB_VALID) &&
   816                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   817             if( result != -1 ) {
   818                 return -2;
   819             }
   820             result = i;
   821         }
   822     }
   824     return result;
   825 }
   827 /**
   828  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   829  * @return the number (0-3) of the replaced entry.
   830  */
   831 static int inline mmu_itlb_update_from_utlb( int entryNo )
   832 {
   833     int replace;
   834     /* Determine entry to replace based on lrui */
   835     if( (mmu_lrui & 0x38) == 0x38 ) {
   836         replace = 0;
   837         mmu_lrui = mmu_lrui & 0x07;
   838     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   839         replace = 1;
   840         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   841     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   842         replace = 2;
   843         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   844     } else { // Note - gets invalid entries too
   845         replace = 3;
   846         mmu_lrui = (mmu_lrui | 0x0B);
   847     }
   849     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   850     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   851     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   852     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   853     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   854     return replace;
   855 }
   857 /**
   858  * Perform the actual itlb lookup w/ asid protection
   859  * Possible utcomes are:
   860  *   0..63 Single match - good, return entry found
   861  *   -1 No match - raise a tlb data miss exception
   862  *   -2 Multiple matches - raise a multi-hit exception (reset)
   863  * @param vpn virtual address to resolve
   864  * @return the resultant ITLB entry, or an error.
   865  */
   866 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   867 {
   868     int result = -1;
   869     unsigned int i;
   871     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   872         if( (mmu_itlb[i].flags & TLB_VALID) &&
   873                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   874                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   875             if( result != -1 ) {
   876                 return -2;
   877             }
   878             result = i;
   879         }
   880     }
   882     if( result == -1 ) {
   883         int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
   884         if( utlbEntry < 0 ) {
   885             return utlbEntry;
   886         } else {
   887             return mmu_itlb_update_from_utlb( utlbEntry );
   888         }
   889     }
   891     switch( result ) {
   892     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   893     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   894     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   895     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   896     }
   898     return result;
   899 }
   901 /**
   902  * Perform the actual itlb lookup on vpn only
   903  * Possible utcomes are:
   904  *   0..63 Single match - good, return entry found
   905  *   -1 No match - raise a tlb data miss exception
   906  *   -2 Multiple matches - raise a multi-hit exception (reset)
   907  * @param vpn virtual address to resolve
   908  * @return the resultant ITLB entry, or an error.
   909  */
   910 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   911 {
   912     int result = -1;
   913     unsigned int i;
   915     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   916         if( (mmu_itlb[i].flags & TLB_VALID) &&
   917                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   918             if( result != -1 ) {
   919                 return -2;
   920             }
   921             result = i;
   922         }
   923     }
   925     if( result == -1 ) {
   926         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   927         if( utlbEntry < 0 ) {
   928             return utlbEntry;
   929         } else {
   930             return mmu_itlb_update_from_utlb( utlbEntry );
   931         }
   932     }
   934     switch( result ) {
   935     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   936     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   937     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   938     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   939     }
   941     return result;
   942 }
   944 /**
   945  * Update the icache for an untranslated address
   946  */
   947 static inline void mmu_update_icache_phys( sh4addr_t addr )
   948 {
   949     if( (addr & 0x1C000000) == 0x0C000000 ) {
   950         /* Main ram */
   951         sh4_icache.page_vma = addr & 0xFF000000;
   952         sh4_icache.page_ppa = 0x0C000000;
   953         sh4_icache.mask = 0xFF000000;
   954         sh4_icache.page = dc_main_ram;
   955     } else if( (addr & 0x1FE00000) == 0 ) {
   956         /* BIOS ROM */
   957         sh4_icache.page_vma = addr & 0xFFE00000;
   958         sh4_icache.page_ppa = 0;
   959         sh4_icache.mask = 0xFFE00000;
   960         sh4_icache.page = dc_boot_rom;
   961     } else {
   962         /* not supported */
   963         sh4_icache.page_vma = -1;
   964     }
   965 }
   967 /**
   968  * Update the sh4_icache structure to describe the page(s) containing the
   969  * given vma. If the address does not reference a RAM/ROM region, the icache
   970  * will be invalidated instead.
   971  * If AT is on, this method will raise TLB exceptions normally
   972  * (hence this method should only be used immediately prior to execution of
   973  * code), and otherwise will set the icache according to the matching TLB entry.
   974  * If AT is off, this method will set the entire referenced RAM/ROM region in
   975  * the icache.
   976  * @return TRUE if the update completed (successfully or otherwise), FALSE
   977  * if an exception was raised.
   978  */
   979 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
   980 {
   981     int entryNo;
   982     if( IS_SH4_PRIVMODE()  ) {
   983         if( addr & 0x80000000 ) {
   984             if( addr < 0xC0000000 ) {
   985                 /* P1, P2 and P4 regions are pass-through (no translation) */
   986                 mmu_update_icache_phys(addr);
   987                 return TRUE;
   988             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   989                 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
   990                 return FALSE;
   991             }
   992         }
   994         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   995         if( (mmucr & MMUCR_AT) == 0 ) {
   996             mmu_update_icache_phys(addr);
   997             return TRUE;
   998         }
  1000         if( (mmucr & MMUCR_SV) == 0 )
  1001         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1002         else
  1003         	entryNo = mmu_itlb_lookup_vpn( addr );
  1004     } else {
  1005         if( addr & 0x80000000 ) {
  1006             RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1007             return FALSE;
  1010         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1011         if( (mmucr & MMUCR_AT) == 0 ) {
  1012             mmu_update_icache_phys(addr);
  1013             return TRUE;
  1016         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1018         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1019             RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1020             return FALSE;
  1024     switch(entryNo) {
  1025     case -1:
  1026     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1027     return FALSE;
  1028     case -2:
  1029     RAISE_TLB_MULTIHIT_ERROR(addr);
  1030     return FALSE;
  1031     default:
  1032         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1033         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1034         if( sh4_icache.page == NULL ) {
  1035             sh4_icache.page_vma = -1;
  1036         } else {
  1037             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1038             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1040         return TRUE;
  1044 /**
  1045  * Translate address for disassembly purposes (ie performs an instruction
  1046  * lookup) - does not raise exceptions or modify any state, and ignores
  1047  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1048  * on translation failure.
  1049  */
  1050 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1052     if( vma & 0x80000000 ) {
  1053         if( vma < 0xC0000000 ) {
  1054             /* P1, P2 and P4 regions are pass-through (no translation) */
  1055             return VMA_TO_EXT_ADDR(vma);
  1056         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1057             /* Not translatable */
  1058             return MMU_VMA_ERROR;
  1062     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1063     if( (mmucr & MMUCR_AT) == 0 ) {
  1064         return VMA_TO_EXT_ADDR(vma);
  1067     int entryNo = mmu_itlb_lookup_vpn( vma );
  1068     if( entryNo == -2 ) {
  1069         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1071     if( entryNo < 0 ) {
  1072         return MMU_VMA_ERROR;
  1073     } else {
  1074         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1075         (vma & (~mmu_itlb[entryNo].mask));
  1079 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
  1081     int queue = (addr&0x20)>>2;
  1082     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
  1083     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1084     sh4addr_t target = (addr&0x03FFFFE0) | hi;
  1085     ext_address_space[target>>12]->write_burst( target, src );
  1088 void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
  1090     int queue = (addr&0x20)>>2;
  1091     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1092     sh4addr_t target;
  1093     /* Store queue operation */
  1094     storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
  1097 /********************** TLB Direct-Access Regions ***************************/
  1098 #ifdef HAVE_FRAME_ADDRESS
  1099 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
  1100 #else
  1101 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
  1102 #endif
  1105 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
  1107 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
  1109     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1110     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
  1113 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
  1115     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1116     ent->vpn = val & 0xFFFFFC00;
  1117     ent->asid = val & 0x000000FF;
  1118     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
  1121 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
  1123     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1124     return (ent->ppn & 0x1FFFFC00) | ent->flags;
  1127 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
  1129     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
  1130     ent->ppn = val & 0x1FFFFC00;
  1131     ent->flags = val & 0x00001DA;
  1132     ent->mask = get_tlb_size_mask(val);
  1133     if( ent->ppn >= 0x1C000000 )
  1134         ent->ppn |= 0xE0000000;
  1137 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
  1138 #define UTLB_ASSOC(addr) (addr&0x80)
  1139 #define UTLB_DATA2(addr) (addr&0x00800000)
  1141 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
  1143     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1144     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
  1145     ((ent->flags & TLB_DIRTY)<<7);
  1147 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
  1149     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1150     if( UTLB_DATA2(addr) ) {
  1151         return ent->pcmcia;
  1152     } else {
  1153         return (ent->ppn&0x1FFFFC00) | ent->flags;
  1157 /**
  1158  * Find a UTLB entry for the associative TLB write - same as the normal
  1159  * lookup but ignores the valid bit.
  1160  */
  1161 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1163     int result = -1;
  1164     unsigned int i;
  1165     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
  1166         if( (mmu_utlb[i].flags & TLB_VALID) &&
  1167                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
  1168                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
  1169             if( result != -1 ) {
  1170                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
  1171                 return -2;
  1173             result = i;
  1176     return result;
  1179 /**
  1180  * Find a ITLB entry for the associative TLB write - same as the normal
  1181  * lookup but ignores the valid bit.
  1182  */
  1183 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
  1185     int result = -1;
  1186     unsigned int i;
  1187     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
  1188         if( (mmu_itlb[i].flags & TLB_VALID) &&
  1189                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
  1190                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
  1191             if( result != -1 ) {
  1192                 return -2;
  1194             result = i;
  1197     return result;
  1200 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
  1202     if( UTLB_ASSOC(addr) ) {
  1203         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
  1204         if( utlb >= 0 ) {
  1205             struct utlb_entry *ent = &mmu_utlb[utlb];
  1206             uint32_t old_flags = ent->flags;
  1207             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
  1208             ent->flags |= (val & TLB_VALID);
  1209             ent->flags |= ((val & 0x200)>>7);
  1210             if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
  1211                 if( old_flags & TLB_VALID )
  1212                     mmu_utlb_remove_entry( utlb );
  1213                 if( ent->flags & TLB_VALID )
  1214                     mmu_utlb_insert_entry( utlb );
  1218         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
  1219         if( itlb >= 0 ) {
  1220             struct itlb_entry *ent = &mmu_itlb[itlb];
  1221             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
  1224         if( itlb == -2 || utlb == -2 ) {
  1225             RAISE_TLB_MULTIHIT_ERROR(addr);
  1226             EXCEPTION_EXIT();
  1227             return;
  1229     } else {
  1230         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1231         if( ent->flags & TLB_VALID ) 
  1232             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1233         ent->vpn = (val & 0xFFFFFC00);
  1234         ent->asid = (val & 0xFF);
  1235         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
  1236         ent->flags |= (val & TLB_VALID);
  1237         ent->flags |= ((val & 0x200)>>7);
  1238         if( ent->flags & TLB_VALID ) 
  1239             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1243 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
  1245     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
  1246     if( UTLB_DATA2(addr) ) {
  1247         ent->pcmcia = val & 0x0000000F;
  1248     } else {
  1249         if( ent->flags & TLB_VALID ) 
  1250             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
  1251         ent->ppn = (val & 0x1FFFFC00);
  1252         ent->flags = (val & 0x000001FF);
  1253         ent->mask = get_tlb_size_mask(val);
  1254         if( ent->flags & TLB_VALID ) 
  1255             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
  1259 struct mem_region_fn p4_region_itlb_addr = {
  1260         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1261         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1262         mmu_itlb_addr_read, mmu_itlb_addr_write,
  1263         unmapped_read_burst, unmapped_write_burst };
  1264 struct mem_region_fn p4_region_itlb_data = {
  1265         mmu_itlb_data_read, mmu_itlb_data_write,
  1266         mmu_itlb_data_read, mmu_itlb_data_write,
  1267         mmu_itlb_data_read, mmu_itlb_data_write,
  1268         unmapped_read_burst, unmapped_write_burst };
  1269 struct mem_region_fn p4_region_utlb_addr = {
  1270         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1271         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1272         mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
  1273         unmapped_read_burst, unmapped_write_burst };
  1274 struct mem_region_fn p4_region_utlb_data = {
  1275         mmu_utlb_data_read, mmu_utlb_data_write,
  1276         mmu_utlb_data_read, mmu_utlb_data_write,
  1277         mmu_utlb_data_read, mmu_utlb_data_write,
  1278         unmapped_read_burst, unmapped_write_burst };
  1280 /********************** Error regions **************************/
  1282 static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
  1284     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1285     EXCEPTION_EXIT();
  1288 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
  1290     RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
  1291     EXCEPTION_EXIT();
  1294 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
  1296     RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
  1297     EXCEPTION_EXIT();
  1300 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
  1302     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1303     EXCEPTION_EXIT();
  1306 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1308     RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
  1309     EXCEPTION_EXIT();
  1312 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
  1314     RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
  1315     EXCEPTION_EXIT();
  1318 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
  1320     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1321     EXCEPTION_EXIT();
  1324 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1326     RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
  1327     EXCEPTION_EXIT();
  1330 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
  1332     RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
  1333     EXCEPTION_EXIT();
  1336 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
  1338     RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
  1339     EXCEPTION_EXIT();
  1342 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
  1344     MMIO_WRITE(MMU, TEA, addr);
  1345     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1346     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1347     EXCEPTION_EXIT();
  1350 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
  1352     MMIO_WRITE(MMU, TEA, addr);
  1353     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1354     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1355     EXCEPTION_EXIT();
  1357 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
  1359     MMIO_WRITE(MMU, TEA, addr);
  1360     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
  1361     sh4_raise_reset(EXC_TLB_MULTI_HIT);
  1362     EXCEPTION_EXIT();
  1365 /**
  1366  * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
  1367  */
  1368 struct mem_region_fn mem_region_address_error = {
  1369         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1370         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1371         (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
  1372         (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
  1374 struct mem_region_fn mem_region_tlb_miss = {
  1375         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1376         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1377         (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
  1378         (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
  1380 struct mem_region_fn mem_region_user_protected = {
  1381         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1382         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1383         (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
  1384         (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
  1386 struct mem_region_fn mem_region_tlb_multihit = {
  1387         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1388         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1389         (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
  1390         (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
.